code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
'''
Tests for fileinput module.
Nick Mathewson
'''
import os
import sys
import re
import fileinput
import collections
import builtins
import unittest
try:
import bz2
except ImportError:
bz2 = None
try:
import gzip
except ImportError:
gzip = None
from io import BytesIO, StringIO
from fileinput import FileInput, hook_encoded
from test.support import verbose, TESTFN, run_unittest, check_warnings
from test.support import unlink as safe_unlink
from unittest import mock
# The fileinput module has 2 interfaces: the FileInput class which does
# all the work, and a few functions (input, etc.) that use a global _state
# variable.
# Write lines (a list of lines) to temp file number i, and return the
# temp file's name.
def writeTmp(i, lines, mode='w'): # opening in text mode is the default
name = TESTFN + str(i)
f = open(name, mode)
for line in lines:
f.write(line)
f.close()
return name
def remove_tempfiles(*names):
for name in names:
if name:
safe_unlink(name)
class BufferSizesTests(unittest.TestCase):
def test_buffer_sizes(self):
# First, run the tests with default and teeny buffer size.
for round, bs in (0, 0), (1, 30):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
self.buffer_size_test(t1, t2, t3, t4, bs, round)
finally:
remove_tempfiles(t1, t2, t3, t4)
def buffer_size_test(self, t1, t2, t3, t4, bs=0, round=0):
pat = re.compile(r'LINE (\d+) OF FILE (\d+)')
start = 1 + round*6
if verbose:
print('%s. Simple iteration (bs=%s)' % (start+0, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
lines = list(fi)
fi.close()
self.assertEqual(len(lines), 31)
self.assertEqual(lines[4], 'Line 5 of file 1\n')
self.assertEqual(lines[30], 'Line 1 of file 4\n')
self.assertEqual(fi.lineno(), 31)
self.assertEqual(fi.filename(), t4)
if verbose:
print('%s. Status variables (bs=%s)' % (start+1, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
s = "x"
while s and s != 'Line 6 of file 2\n':
s = fi.readline()
self.assertEqual(fi.filename(), t2)
self.assertEqual(fi.lineno(), 21)
self.assertEqual(fi.filelineno(), 6)
self.assertFalse(fi.isfirstline())
self.assertFalse(fi.isstdin())
if verbose:
print('%s. Nextfile (bs=%s)' % (start+2, bs))
fi.nextfile()
self.assertEqual(fi.readline(), 'Line 1 of file 3\n')
self.assertEqual(fi.lineno(), 22)
fi.close()
if verbose:
print('%s. Stdin (bs=%s)' % (start+3, bs))
fi = FileInput(files=(t1, t2, t3, t4, '-'), bufsize=bs)
savestdin = sys.stdin
try:
sys.stdin = StringIO("Line 1 of stdin\nLine 2 of stdin\n")
lines = list(fi)
self.assertEqual(len(lines), 33)
self.assertEqual(lines[32], 'Line 2 of stdin\n')
self.assertEqual(fi.filename(), '<stdin>')
fi.nextfile()
finally:
sys.stdin = savestdin
if verbose:
print('%s. Boundary conditions (bs=%s)' % (start+4, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
fi.nextfile()
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
if verbose:
print('%s. Inplace (bs=%s)' % (start+5, bs))
savestdout = sys.stdout
try:
fi = FileInput(files=(t1, t2, t3, t4), inplace=1, bufsize=bs)
for line in fi:
line = line[:-1].upper()
print(line)
fi.close()
finally:
sys.stdout = savestdout
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
for line in fi:
self.assertEqual(line[-1], '\n')
m = pat.match(line[:-1])
self.assertNotEqual(m, None)
self.assertEqual(int(m.group(1)), fi.filelineno())
fi.close()
class UnconditionallyRaise:
def __init__(self, exception_type):
self.exception_type = exception_type
self.invoked = False
def __call__(self, *args, **kwargs):
self.invoked = True
raise self.exception_type()
class FileInputTests(unittest.TestCase):
def test_zero_byte_files(self):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, [""])
t2 = writeTmp(2, [""])
t3 = writeTmp(3, ["The only line there is.\n"])
t4 = writeTmp(4, [""])
fi = FileInput(files=(t1, t2, t3, t4))
line = fi.readline()
self.assertEqual(line, 'The only line there is.\n')
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 1)
self.assertEqual(fi.filename(), t3)
line = fi.readline()
self.assertFalse(line)
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 0)
self.assertEqual(fi.filename(), t4)
fi.close()
finally:
remove_tempfiles(t1, t2, t3, t4)
def test_files_that_dont_end_with_newline(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
fi = FileInput(files=(t1, t2))
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
finally:
remove_tempfiles(t1, t2)
## def test_unicode_filenames(self):
## # XXX A unicode string is always returned by writeTmp.
## # So is this needed?
## try:
## t1 = writeTmp(1, ["A\nB"])
## encoding = sys.getfilesystemencoding()
## if encoding is None:
## encoding = 'ascii'
## fi = FileInput(files=str(t1, encoding))
## lines = list(fi)
## self.assertEqual(lines, ["A\n", "B"])
## finally:
## remove_tempfiles(t1)
def test_fileno(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB"])
t2 = writeTmp(2, ["C\nD"])
fi = FileInput(files=(t1, t2))
self.assertEqual(fi.fileno(), -1)
line =next( fi)
self.assertNotEqual(fi.fileno(), -1)
fi.nextfile()
self.assertEqual(fi.fileno(), -1)
line = list(fi)
self.assertEqual(fi.fileno(), -1)
finally:
remove_tempfiles(t1, t2)
def test_opening_mode(self):
try:
# invalid mode, should raise ValueError
fi = FileInput(mode="w")
self.fail("FileInput should reject invalid mode argument")
except ValueError:
pass
t1 = None
try:
# try opening in universal newline mode
t1 = writeTmp(1, [b"A\nB\r\nC\rD"], mode="wb")
with check_warnings(('', DeprecationWarning)):
fi = FileInput(files=t1, mode="U")
with check_warnings(('', DeprecationWarning)):
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C\n", "D"])
finally:
remove_tempfiles(t1)
def test_stdin_binary_mode(self):
with mock.patch('sys.stdin') as m_stdin:
m_stdin.buffer = BytesIO(b'spam, bacon, sausage, and spam')
fi = FileInput(files=['-'], mode='rb')
lines = list(fi)
self.assertEqual(lines, [b'spam, bacon, sausage, and spam'])
def test_file_opening_hook(self):
try:
# cannot use openhook and inplace mode
fi = FileInput(inplace=1, openhook=lambda f, m: None)
self.fail("FileInput should raise if both inplace "
"and openhook arguments are given")
except ValueError:
pass
try:
fi = FileInput(openhook=1)
self.fail("FileInput should check openhook for being callable")
except ValueError:
pass
class CustomOpenHook:
def __init__(self):
self.invoked = False
def __call__(self, *args):
self.invoked = True
return open(*args)
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
custom_open_hook = CustomOpenHook()
with FileInput([t], openhook=custom_open_hook) as fi:
fi.readline()
self.assertTrue(custom_open_hook.invoked, "openhook not invoked")
def test_readline(self):
with open(TESTFN, 'wb') as f:
f.write(b'A\nB\r\nC\r')
# Fill TextIOWrapper buffer.
f.write(b'123456789\n' * 1000)
# Issue #20501: readline() shouldn't read whole file.
f.write(b'\x80')
self.addCleanup(safe_unlink, TESTFN)
with FileInput(files=TESTFN,
openhook=hook_encoded('ascii'), bufsize=8) as fi:
try:
self.assertEqual(fi.readline(), 'A\n')
self.assertEqual(fi.readline(), 'B\n')
self.assertEqual(fi.readline(), 'C\n')
except UnicodeDecodeError:
self.fail('Read to end of file')
with self.assertRaises(UnicodeDecodeError):
# Read to the end of file.
list(fi)
def test_context_manager(self):
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
with FileInput(files=(t1, t2)) as fi:
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
self.assertEqual(fi._files, ())
finally:
remove_tempfiles(t1, t2)
def test_close_on_exception(self):
try:
t1 = writeTmp(1, [""])
with FileInput(files=t1) as fi:
raise OSError
except OSError:
self.assertEqual(fi._files, ())
finally:
remove_tempfiles(t1)
def test_empty_files_list_specified_to_constructor(self):
with FileInput(files=[]) as fi:
self.assertEqual(fi._files, ('-',))
def test__getitem__(self):
"""Tests invoking FileInput.__getitem__() with the current
line number"""
t = writeTmp(1, ["line1\n", "line2\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
retval1 = fi[0]
self.assertEqual(retval1, "line1\n")
retval2 = fi[1]
self.assertEqual(retval2, "line2\n")
def test__getitem__invalid_key(self):
"""Tests invoking FileInput.__getitem__() with an index unequal to
the line number"""
t = writeTmp(1, ["line1\n", "line2\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
with self.assertRaises(RuntimeError) as cm:
fi[1]
self.assertEqual(cm.exception.args, ("accessing lines out of order",))
def test__getitem__eof(self):
"""Tests invoking FileInput.__getitem__() with the line number but at
end-of-input"""
t = writeTmp(1, [])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
with self.assertRaises(IndexError) as cm:
fi[0]
self.assertEqual(cm.exception.args, ("end of input reached",))
def test_nextfile_oserror_deleting_backup(self):
"""Tests invoking FileInput.nextfile() when the attempt to delete
the backup file would raise OSError. This error is expected to be
silently ignored"""
os_unlink_orig = os.unlink
os_unlink_replacement = UnconditionallyRaise(OSError)
try:
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t], inplace=True) as fi:
next(fi) # make sure the file is opened
os.unlink = os_unlink_replacement
fi.nextfile()
finally:
os.unlink = os_unlink_orig
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(os_unlink_replacement.invoked,
"os.unlink() was not invoked")
def test_readline_os_fstat_raises_OSError(self):
"""Tests invoking FileInput.readline() when os.fstat() raises OSError.
This exception should be silently discarded."""
os_fstat_orig = os.fstat
os_fstat_replacement = UnconditionallyRaise(OSError)
try:
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t], inplace=True) as fi:
os.fstat = os_fstat_replacement
fi.readline()
finally:
os.fstat = os_fstat_orig
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(os_fstat_replacement.invoked,
"os.fstat() was not invoked")
@unittest.skipIf(not hasattr(os, "chmod"), "os.chmod does not exist")
def test_readline_os_chmod_raises_OSError(self):
"""Tests invoking FileInput.readline() when os.chmod() raises OSError.
This exception should be silently discarded."""
os_chmod_orig = os.chmod
os_chmod_replacement = UnconditionallyRaise(OSError)
try:
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t], inplace=True) as fi:
os.chmod = os_chmod_replacement
fi.readline()
finally:
os.chmod = os_chmod_orig
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(os_chmod_replacement.invoked,
"os.fstat() was not invoked")
def test_fileno_when_ValueError_raised(self):
class FilenoRaisesValueError(UnconditionallyRaise):
def __init__(self):
UnconditionallyRaise.__init__(self, ValueError)
def fileno(self):
self.__call__()
unconditionally_raise_ValueError = FilenoRaisesValueError()
t = writeTmp(1, ["\n"])
self.addCleanup(remove_tempfiles, t)
with FileInput(files=[t]) as fi:
file_backup = fi._file
try:
fi._file = unconditionally_raise_ValueError
result = fi.fileno()
finally:
fi._file = file_backup # make sure the file gets cleaned up
# sanity check to make sure that our test scenario was actually hit
self.assertTrue(unconditionally_raise_ValueError.invoked,
"_file.fileno() was not invoked")
self.assertEqual(result, -1, "fileno() should return -1")
class MockFileInput:
"""A class that mocks out fileinput.FileInput for use during unit tests"""
def __init__(self, files=None, inplace=False, backup="", bufsize=0,
mode="r", openhook=None):
self.files = files
self.inplace = inplace
self.backup = backup
self.bufsize = bufsize
self.mode = mode
self.openhook = openhook
self._file = None
self.invocation_counts = collections.defaultdict(lambda: 0)
self.return_values = {}
def close(self):
self.invocation_counts["close"] += 1
def nextfile(self):
self.invocation_counts["nextfile"] += 1
return self.return_values["nextfile"]
def filename(self):
self.invocation_counts["filename"] += 1
return self.return_values["filename"]
def lineno(self):
self.invocation_counts["lineno"] += 1
return self.return_values["lineno"]
def filelineno(self):
self.invocation_counts["filelineno"] += 1
return self.return_values["filelineno"]
def fileno(self):
self.invocation_counts["fileno"] += 1
return self.return_values["fileno"]
def isfirstline(self):
self.invocation_counts["isfirstline"] += 1
return self.return_values["isfirstline"]
def isstdin(self):
self.invocation_counts["isstdin"] += 1
return self.return_values["isstdin"]
class BaseFileInputGlobalMethodsTest(unittest.TestCase):
"""Base class for unit tests for the global function of
the fileinput module."""
def setUp(self):
self._orig_state = fileinput._state
self._orig_FileInput = fileinput.FileInput
fileinput.FileInput = MockFileInput
def tearDown(self):
fileinput.FileInput = self._orig_FileInput
fileinput._state = self._orig_state
def assertExactlyOneInvocation(self, mock_file_input, method_name):
# assert that the method with the given name was invoked once
actual_count = mock_file_input.invocation_counts[method_name]
self.assertEqual(actual_count, 1, method_name)
# assert that no other unexpected methods were invoked
actual_total_count = len(mock_file_input.invocation_counts)
self.assertEqual(actual_total_count, 1)
class Test_fileinput_input(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.input()"""
def test_state_is_not_None_and_state_file_is_not_None(self):
"""Tests invoking fileinput.input() when fileinput._state is not None
and its _file attribute is also not None. Expect RuntimeError to
be raised with a meaningful error message and for fileinput._state
to *not* be modified."""
instance = MockFileInput()
instance._file = object()
fileinput._state = instance
with self.assertRaises(RuntimeError) as cm:
fileinput.input()
self.assertEqual(("input() already active",), cm.exception.args)
self.assertIs(instance, fileinput._state, "fileinput._state")
def test_state_is_not_None_and_state_file_is_None(self):
"""Tests invoking fileinput.input() when fileinput._state is not None
but its _file attribute *is* None. Expect it to create and return
a new fileinput.FileInput object with all method parameters passed
explicitly to the __init__() method; also ensure that
fileinput._state is set to the returned instance."""
instance = MockFileInput()
instance._file = None
fileinput._state = instance
self.do_test_call_input()
def test_state_is_None(self):
"""Tests invoking fileinput.input() when fileinput._state is None
Expect it to create and return a new fileinput.FileInput object
with all method parameters passed explicitly to the __init__()
method; also ensure that fileinput._state is set to the returned
instance."""
fileinput._state = None
self.do_test_call_input()
def do_test_call_input(self):
"""Tests that fileinput.input() creates a new fileinput.FileInput
object, passing the given parameters unmodified to
fileinput.FileInput.__init__(). Note that this test depends on the
monkey patching of fileinput.FileInput done by setUp()."""
files = object()
inplace = object()
backup = object()
bufsize = object()
mode = object()
openhook = object()
# call fileinput.input() with different values for each argument
result = fileinput.input(files=files, inplace=inplace, backup=backup,
bufsize=bufsize,
mode=mode, openhook=openhook)
# ensure fileinput._state was set to the returned object
self.assertIs(result, fileinput._state, "fileinput._state")
# ensure the parameters to fileinput.input() were passed directly
# to FileInput.__init__()
self.assertIs(files, result.files, "files")
self.assertIs(inplace, result.inplace, "inplace")
self.assertIs(backup, result.backup, "backup")
self.assertIs(bufsize, result.bufsize, "bufsize")
self.assertIs(mode, result.mode, "mode")
self.assertIs(openhook, result.openhook, "openhook")
class Test_fileinput_close(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.close()"""
def test_state_is_None(self):
"""Tests that fileinput.close() does nothing if fileinput._state
is None"""
fileinput._state = None
fileinput.close()
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests that fileinput.close() invokes close() on fileinput._state
and sets _state=None"""
instance = MockFileInput()
fileinput._state = instance
fileinput.close()
self.assertExactlyOneInvocation(instance, "close")
self.assertIsNone(fileinput._state)
class Test_fileinput_nextfile(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.nextfile()"""
def test_state_is_None(self):
"""Tests fileinput.nextfile() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.nextfile()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.nextfile() when fileinput._state is not None.
Ensure that it invokes fileinput._state.nextfile() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
nextfile_retval = object()
instance = MockFileInput()
instance.return_values["nextfile"] = nextfile_retval
fileinput._state = instance
retval = fileinput.nextfile()
self.assertExactlyOneInvocation(instance, "nextfile")
self.assertIs(retval, nextfile_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_filename(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.filename()"""
def test_state_is_None(self):
"""Tests fileinput.filename() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.filename()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.filename() when fileinput._state is not None.
Ensure that it invokes fileinput._state.filename() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
filename_retval = object()
instance = MockFileInput()
instance.return_values["filename"] = filename_retval
fileinput._state = instance
retval = fileinput.filename()
self.assertExactlyOneInvocation(instance, "filename")
self.assertIs(retval, filename_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_lineno(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.lineno()"""
def test_state_is_None(self):
"""Tests fileinput.lineno() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.lineno()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.lineno() when fileinput._state is not None.
Ensure that it invokes fileinput._state.lineno() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
lineno_retval = object()
instance = MockFileInput()
instance.return_values["lineno"] = lineno_retval
fileinput._state = instance
retval = fileinput.lineno()
self.assertExactlyOneInvocation(instance, "lineno")
self.assertIs(retval, lineno_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_filelineno(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.filelineno()"""
def test_state_is_None(self):
"""Tests fileinput.filelineno() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.filelineno()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.filelineno() when fileinput._state is not None.
Ensure that it invokes fileinput._state.filelineno() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
filelineno_retval = object()
instance = MockFileInput()
instance.return_values["filelineno"] = filelineno_retval
fileinput._state = instance
retval = fileinput.filelineno()
self.assertExactlyOneInvocation(instance, "filelineno")
self.assertIs(retval, filelineno_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_fileno(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.fileno()"""
def test_state_is_None(self):
"""Tests fileinput.fileno() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.fileno()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.fileno() when fileinput._state is not None.
Ensure that it invokes fileinput._state.fileno() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
fileno_retval = object()
instance = MockFileInput()
instance.return_values["fileno"] = fileno_retval
instance.fileno_retval = fileno_retval
fileinput._state = instance
retval = fileinput.fileno()
self.assertExactlyOneInvocation(instance, "fileno")
self.assertIs(retval, fileno_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_isfirstline(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.isfirstline()"""
def test_state_is_None(self):
"""Tests fileinput.isfirstline() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.isfirstline()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.isfirstline() when fileinput._state is not None.
Ensure that it invokes fileinput._state.isfirstline() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
isfirstline_retval = object()
instance = MockFileInput()
instance.return_values["isfirstline"] = isfirstline_retval
fileinput._state = instance
retval = fileinput.isfirstline()
self.assertExactlyOneInvocation(instance, "isfirstline")
self.assertIs(retval, isfirstline_retval)
self.assertIs(fileinput._state, instance)
class Test_fileinput_isstdin(BaseFileInputGlobalMethodsTest):
"""Unit tests for fileinput.isstdin()"""
def test_state_is_None(self):
"""Tests fileinput.isstdin() when fileinput._state is None.
Ensure that it raises RuntimeError with a meaningful error message
and does not modify fileinput._state"""
fileinput._state = None
with self.assertRaises(RuntimeError) as cm:
fileinput.isstdin()
self.assertEqual(("no active input()",), cm.exception.args)
self.assertIsNone(fileinput._state)
def test_state_is_not_None(self):
"""Tests fileinput.isstdin() when fileinput._state is not None.
Ensure that it invokes fileinput._state.isstdin() exactly once,
returns whatever it returns, and does not modify fileinput._state
to point to a different object."""
isstdin_retval = object()
instance = MockFileInput()
instance.return_values["isstdin"] = isstdin_retval
fileinput._state = instance
retval = fileinput.isstdin()
self.assertExactlyOneInvocation(instance, "isstdin")
self.assertIs(retval, isstdin_retval)
self.assertIs(fileinput._state, instance)
class InvocationRecorder:
def __init__(self):
self.invocation_count = 0
def __call__(self, *args, **kwargs):
self.invocation_count += 1
self.last_invocation = (args, kwargs)
class Test_hook_compressed(unittest.TestCase):
"""Unit tests for fileinput.hook_compressed()"""
def setUp(self):
self.fake_open = InvocationRecorder()
def test_empty_string(self):
self.do_test_use_builtin_open("", 1)
def test_no_ext(self):
self.do_test_use_builtin_open("abcd", 2)
@unittest.skipUnless(gzip, "Requires gzip and zlib")
def test_gz_ext_fake(self):
original_open = gzip.open
gzip.open = self.fake_open
try:
result = fileinput.hook_compressed("test.gz", 3)
finally:
gzip.open = original_open
self.assertEqual(self.fake_open.invocation_count, 1)
self.assertEqual(self.fake_open.last_invocation, (("test.gz", 3), {}))
@unittest.skipUnless(bz2, "Requires bz2")
def test_bz2_ext_fake(self):
original_open = bz2.BZ2File
bz2.BZ2File = self.fake_open
try:
result = fileinput.hook_compressed("test.bz2", 4)
finally:
bz2.BZ2File = original_open
self.assertEqual(self.fake_open.invocation_count, 1)
self.assertEqual(self.fake_open.last_invocation, (("test.bz2", 4), {}))
def test_blah_ext(self):
self.do_test_use_builtin_open("abcd.blah", 5)
def test_gz_ext_builtin(self):
self.do_test_use_builtin_open("abcd.Gz", 6)
def test_bz2_ext_builtin(self):
self.do_test_use_builtin_open("abcd.Bz2", 7)
def do_test_use_builtin_open(self, filename, mode):
original_open = self.replace_builtin_open(self.fake_open)
try:
result = fileinput.hook_compressed(filename, mode)
finally:
self.replace_builtin_open(original_open)
self.assertEqual(self.fake_open.invocation_count, 1)
self.assertEqual(self.fake_open.last_invocation,
((filename, mode), {}))
@staticmethod
def replace_builtin_open(new_open_func):
original_open = builtins.open
builtins.open = new_open_func
return original_open
class Test_hook_encoded(unittest.TestCase):
"""Unit tests for fileinput.hook_encoded()"""
def test(self):
encoding = object()
result = fileinput.hook_encoded(encoding)
fake_open = InvocationRecorder()
original_open = builtins.open
builtins.open = fake_open
try:
filename = object()
mode = object()
open_result = result(filename, mode)
finally:
builtins.open = original_open
self.assertEqual(fake_open.invocation_count, 1)
args, kwargs = fake_open.last_invocation
self.assertIs(args[0], filename)
self.assertIs(args[1], mode)
self.assertIs(kwargs.pop('encoding'), encoding)
self.assertFalse(kwargs)
def test_modes(self):
with open(TESTFN, 'wb') as f:
# UTF-7 is a convenient, seldom used encoding
f.write(b'A\nB\r\nC\rD+IKw-')
self.addCleanup(safe_unlink, TESTFN)
def check(mode, expected_lines):
with FileInput(files=TESTFN, mode=mode,
openhook=hook_encoded('utf-7')) as fi:
lines = list(fi)
self.assertEqual(lines, expected_lines)
check('r', ['A\n', 'B\n', 'C\n', 'D\u20ac'])
with self.assertWarns(DeprecationWarning):
check('rU', ['A\n', 'B\n', 'C\n', 'D\u20ac'])
with self.assertWarns(DeprecationWarning):
check('U', ['A\n', 'B\n', 'C\n', 'D\u20ac'])
with self.assertRaises(ValueError):
check('rb', ['A\n', 'B\r\n', 'C\r', 'D\u20ac'])
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import copy
import pendulum
import pytest
from sqlalchemy import select
from sqlalchemy.orm import Session
from airflow._shared.timezones import timezone
from airflow.models.asset import AssetAliasModel, AssetEvent, AssetModel
from airflow.models.dagbag import DBDagBag
from airflow.providers.standard.operators.empty import EmptyOperator
from airflow.providers.standard.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.providers.standard.sensors.external_task import ExternalTaskSensor
from airflow.sdk import Metadata, task
from airflow.sdk.definitions.asset import Asset, AssetAlias, Dataset
from tests_common.test_utils.asserts import assert_queries_count
from tests_common.test_utils.db import clear_db_assets, clear_db_runs
pytestmark = pytest.mark.db_test
DAG_ID = "dag_with_multiple_versions"
DAG_ID_EXTERNAL_TRIGGER = "external_trigger"
DAG_ID_RESOLVED_ASSET_ALIAS = "dag_with_resolved_asset_alias"
DAG_ID_LINEAR_DEPTH = "linear_depth_dag"
DAG_ID_NONLINEAR_DEPTH = "nonlinear_depth_dag"
LATEST_VERSION_DAG_RESPONSE: dict = {
"edges": [],
"nodes": [
{
"children": None,
"id": "task1",
"is_mapped": None,
"label": "task1",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "EmptyOperator",
"asset_condition_type": None,
},
{
"children": None,
"id": "task2",
"is_mapped": None,
"label": "task2",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "EmptyOperator",
"asset_condition_type": None,
},
{
"children": None,
"id": "task3",
"is_mapped": None,
"label": "task3",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "EmptyOperator",
"asset_condition_type": None,
},
],
}
SECOND_VERSION_DAG_RESPONSE: dict = copy.deepcopy(LATEST_VERSION_DAG_RESPONSE)
SECOND_VERSION_DAG_RESPONSE["nodes"] = [
node for node in SECOND_VERSION_DAG_RESPONSE["nodes"] if node["id"] != "task3"
]
FIRST_VERSION_DAG_RESPONSE: dict = copy.deepcopy(SECOND_VERSION_DAG_RESPONSE)
FIRST_VERSION_DAG_RESPONSE["nodes"] = [
node for node in FIRST_VERSION_DAG_RESPONSE["nodes"] if node["id"] != "task2"
]
@pytest.fixture(autouse=True, scope="module")
def examples_dag_bag() -> DBDagBag:
return DBDagBag()
@pytest.fixture(autouse=True)
def clean():
clear_db_runs()
clear_db_assets()
yield
clear_db_runs()
clear_db_assets()
@pytest.fixture
def asset1() -> Asset:
return Asset(uri="s3://bucket/next-run-asset/1", name="asset1")
@pytest.fixture
def asset2() -> Asset:
return Asset(uri="s3://bucket/next-run-asset/2", name="asset2")
@pytest.fixture
def asset3() -> Dataset:
return Dataset(uri="s3://dataset-bucket/example.csv")
@pytest.fixture
def make_dags(dag_maker, session, time_machine, asset1: Asset, asset2: Asset, asset3: Dataset) -> None:
with dag_maker(
dag_id=DAG_ID_EXTERNAL_TRIGGER,
serialized=True,
session=session,
start_date=pendulum.DateTime(2023, 2, 1, 0, 0, 0, tzinfo=pendulum.UTC),
):
TriggerDagRunOperator(task_id="trigger_dag_run_operator", trigger_dag_id=DAG_ID)
dag_maker.sync_dagbag_to_db()
with dag_maker(
dag_id=DAG_ID,
serialized=True,
session=session,
start_date=pendulum.DateTime(2023, 2, 1, 0, 0, 0, tzinfo=pendulum.UTC),
schedule=(asset1 & asset2 & AssetAlias("example-alias")),
):
(
EmptyOperator(task_id="task_1", outlets=[asset3])
>> ExternalTaskSensor(task_id="external_task_sensor", external_dag_id=DAG_ID)
>> EmptyOperator(task_id="task_2")
)
dag_maker.sync_dagbag_to_db()
with dag_maker(
dag_id=DAG_ID_RESOLVED_ASSET_ALIAS,
serialized=True,
session=session,
start_date=pendulum.DateTime(2023, 2, 1, 0, 0, 0, tzinfo=pendulum.UTC),
):
@task(outlets=[AssetAlias("example-alias-resolved")])
def task_1(**context):
yield Metadata(
asset=Asset("resolved_example_asset_alias"),
extra={"k": "v"}, # extra has to be provided, can be {}
alias=AssetAlias("example-alias-resolved"),
)
task_1() >> EmptyOperator(task_id="task_2")
dr = dag_maker.create_dagrun()
asset_alias = session.scalar(
select(AssetAliasModel).where(AssetAliasModel.name == "example-alias-resolved")
)
asset_model = AssetModel(name="resolved_example_asset_alias")
session.add(asset_model)
session.flush()
asset_alias.assets.append(asset_model)
asset_alias.asset_events.append(
AssetEvent(
id=1,
timestamp=timezone.parse("2021-01-01T00:00:00"),
asset_id=asset_model.id,
source_dag_id=DAG_ID_RESOLVED_ASSET_ALIAS,
source_task_id="task_1",
source_run_id=dr.run_id,
source_map_index=-1,
)
)
session.commit()
dag_maker.sync_dagbag_to_db()
# Linear DAG with 5 tasks for depth testing
with dag_maker(
dag_id=DAG_ID_LINEAR_DEPTH,
serialized=True,
session=session,
start_date=pendulum.DateTime(2023, 2, 1, 0, 0, 0, tzinfo=pendulum.UTC),
):
task_a = EmptyOperator(task_id="task_a")
task_b = EmptyOperator(task_id="task_b")
task_c = EmptyOperator(task_id="task_c")
task_d = EmptyOperator(task_id="task_d")
task_e = EmptyOperator(task_id="task_e")
# Linear chain: task_a >> task_b >> task_c >> task_d >> task_e
task_a >> task_b >> task_c >> task_d >> task_e
dag_maker.sync_dagbag_to_db()
# Non-linear DAG for depth testing with branching and merging
with dag_maker(
dag_id=DAG_ID_NONLINEAR_DEPTH,
serialized=True,
session=session,
start_date=pendulum.DateTime(2023, 2, 1, 0, 0, 0, tzinfo=pendulum.UTC),
):
start = EmptyOperator(task_id="start")
branch_a = EmptyOperator(task_id="branch_a")
branch_b = EmptyOperator(task_id="branch_b")
intermediate = EmptyOperator(task_id="intermediate")
merge = EmptyOperator(task_id="merge")
end = EmptyOperator(task_id="end")
# Non-linear structure
start >> [branch_a, branch_b]
branch_a >> intermediate >> merge
branch_b >> merge
merge >> end
dag_maker.sync_dagbag_to_db()
def _fetch_asset_id(asset: Asset, session: Session) -> str:
return str(
session.scalar(
select(AssetModel.id).where(AssetModel.name == asset.name, AssetModel.uri == asset.uri)
)
)
@pytest.fixture
def asset1_id(make_dags, asset1, session: Session) -> str:
return _fetch_asset_id(asset1, session)
@pytest.fixture
def asset2_id(make_dags, asset2, session) -> str:
return _fetch_asset_id(asset2, session)
@pytest.fixture
def asset3_id(make_dags, asset3, session) -> str:
return _fetch_asset_id(asset3, session)
class TestStructureDataEndpoint:
@pytest.mark.parametrize(
("params", "expected", "expected_queries_count"),
[
(
{"dag_id": DAG_ID},
{
"edges": [
{
"is_setup_teardown": None,
"is_source_asset": None,
"label": None,
"source_id": "external_task_sensor",
"target_id": "task_2",
},
{
"is_setup_teardown": None,
"is_source_asset": None,
"label": None,
"source_id": "task_1",
"target_id": "external_task_sensor",
},
],
"nodes": [
{
"asset_condition_type": None,
"children": None,
"id": "task_1",
"is_mapped": None,
"label": "task_1",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "EmptyOperator",
},
{
"asset_condition_type": None,
"children": None,
"id": "external_task_sensor",
"is_mapped": None,
"label": "external_task_sensor",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "ExternalTaskSensor",
},
{
"asset_condition_type": None,
"children": None,
"id": "task_2",
"is_mapped": None,
"label": "task_2",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "EmptyOperator",
},
],
},
6,
),
(
{
"dag_id": DAG_ID,
"root": "unknown_task",
},
{"edges": [], "nodes": []},
6,
),
(
{
"dag_id": DAG_ID,
"root": "task_1",
"filter_upstream": False,
"filter_downstream": False,
},
{
"edges": [],
"nodes": [
{
"asset_condition_type": None,
"children": None,
"id": "task_1",
"is_mapped": None,
"label": "task_1",
"operator": "EmptyOperator",
"setup_teardown_type": None,
"tooltip": None,
"type": "task",
},
],
},
6,
),
(
{"dag_id": DAG_ID_EXTERNAL_TRIGGER, "external_dependencies": True},
{
"edges": [
{
"is_source_asset": None,
"is_setup_teardown": None,
"label": None,
"source_id": "trigger_dag_run_operator",
"target_id": "trigger:external_trigger:dag_with_multiple_versions:trigger_dag_run_operator",
}
],
"nodes": [
{
"asset_condition_type": None,
"children": None,
"id": "trigger_dag_run_operator",
"is_mapped": None,
"label": "trigger_dag_run_operator",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "TriggerDagRunOperator",
},
{
"asset_condition_type": None,
"children": None,
"id": "trigger:external_trigger:dag_with_multiple_versions:trigger_dag_run_operator",
"is_mapped": None,
"label": "trigger_dag_run_operator",
"tooltip": None,
"setup_teardown_type": None,
"type": "trigger",
"operator": None,
},
],
},
13,
),
],
)
@pytest.mark.usefixtures("make_dags")
def test_should_return_200(self, test_client, params, expected, expected_queries_count):
with assert_queries_count(expected_queries_count):
response = test_client.get("/structure/structure_data", params=params)
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.usefixtures("make_dags")
def test_should_return_200_with_asset(self, test_client, asset1_id, asset2_id, asset3_id):
params = {
"dag_id": DAG_ID,
"external_dependencies": True,
}
expected = {
"edges": [
{
"is_setup_teardown": None,
"label": None,
"source_id": "external_task_sensor",
"target_id": "task_2",
"is_source_asset": None,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": "task_1",
"target_id": "external_task_sensor",
"is_source_asset": None,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": "and-gate-0",
"target_id": "task_1",
"is_source_asset": True,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": asset1_id,
"target_id": "and-gate-0",
"is_source_asset": None,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": asset2_id,
"target_id": "and-gate-0",
"is_source_asset": None,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": "example-alias",
"target_id": "and-gate-0",
"is_source_asset": None,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": "sensor:dag_with_multiple_versions:dag_with_multiple_versions:external_task_sensor",
"target_id": "task_1",
"is_source_asset": None,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": "trigger:external_trigger:dag_with_multiple_versions:trigger_dag_run_operator",
"target_id": "task_1",
"is_source_asset": None,
},
{
"is_setup_teardown": None,
"label": None,
"source_id": "task_1",
"target_id": f"asset:{asset3_id}",
"is_source_asset": None,
},
],
"nodes": [
{
"children": None,
"id": "task_1",
"is_mapped": None,
"label": "task_1",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "EmptyOperator",
"asset_condition_type": None,
},
{
"children": None,
"id": "external_task_sensor",
"is_mapped": None,
"label": "external_task_sensor",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "ExternalTaskSensor",
"asset_condition_type": None,
},
{
"children": None,
"id": "task_2",
"is_mapped": None,
"label": "task_2",
"tooltip": None,
"setup_teardown_type": None,
"type": "task",
"operator": "EmptyOperator",
"asset_condition_type": None,
},
{
"children": None,
"id": f"asset:{asset3_id}",
"is_mapped": None,
"label": "s3://dataset-bucket/example.csv",
"tooltip": None,
"setup_teardown_type": None,
"type": "asset",
"operator": None,
"asset_condition_type": None,
},
{
"children": None,
"id": "sensor:dag_with_multiple_versions:dag_with_multiple_versions:external_task_sensor",
"is_mapped": None,
"label": "external_task_sensor",
"tooltip": None,
"setup_teardown_type": None,
"type": "sensor",
"operator": None,
"asset_condition_type": None,
},
{
"children": None,
"id": "trigger:external_trigger:dag_with_multiple_versions:trigger_dag_run_operator",
"is_mapped": None,
"label": "trigger_dag_run_operator",
"tooltip": None,
"setup_teardown_type": None,
"type": "trigger",
"operator": None,
"asset_condition_type": None,
},
{
"children": None,
"id": "and-gate-0",
"is_mapped": None,
"label": "and-gate-0",
"tooltip": None,
"setup_teardown_type": None,
"type": "asset-condition",
"operator": None,
"asset_condition_type": "and-gate",
},
{
"children": None,
"id": asset1_id,
"is_mapped": None,
"label": "asset1",
"tooltip": None,
"setup_teardown_type": None,
"type": "asset",
"operator": None,
"asset_condition_type": None,
},
{
"children": None,
"id": asset2_id,
"is_mapped": None,
"label": "asset2",
"tooltip": None,
"setup_teardown_type": None,
"type": "asset",
"operator": None,
"asset_condition_type": None,
},
{
"children": None,
"id": "example-alias",
"is_mapped": None,
"label": "example-alias",
"tooltip": None,
"setup_teardown_type": None,
"type": "asset-alias",
"operator": None,
"asset_condition_type": None,
},
],
}
with assert_queries_count(13):
response = test_client.get("/structure/structure_data", params=params)
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.usefixtures("make_dags")
def test_should_return_200_with_resolved_asset_alias_attached_to_the_corrrect_producing_task(
self, test_client, session
):
resolved_asset = session.scalar(
select(AssetModel).where(AssetModel.name == "resolved_example_asset_alias")
)
params = {
"dag_id": DAG_ID_RESOLVED_ASSET_ALIAS,
"external_dependencies": True,
}
expected = {
"edges": [
{
"source_id": "task_1",
"target_id": "task_2",
"is_setup_teardown": None,
"label": None,
"is_source_asset": None,
},
{
"source_id": "task_1",
"target_id": f"asset:{resolved_asset.id}",
"is_setup_teardown": None,
"label": None,
"is_source_asset": None,
},
],
"nodes": [
{
"id": "task_1",
"label": "task_1",
"type": "task",
"children": None,
"is_mapped": None,
"tooltip": None,
"setup_teardown_type": None,
"operator": "@task",
"asset_condition_type": None,
},
{
"id": "task_2",
"label": "task_2",
"type": "task",
"children": None,
"is_mapped": None,
"tooltip": None,
"setup_teardown_type": None,
"operator": "EmptyOperator",
"asset_condition_type": None,
},
{
"id": f"asset:{resolved_asset.id}",
"label": "resolved_example_asset_alias",
"type": "asset",
"children": None,
"is_mapped": None,
"tooltip": None,
"setup_teardown_type": None,
"operator": None,
"asset_condition_type": None,
},
],
}
response = test_client.get("/structure/structure_data", params=params)
assert response.status_code == 200
assert response.json() == expected
@pytest.mark.parametrize(
("params", "expected"),
[
pytest.param(
{"dag_id": DAG_ID},
LATEST_VERSION_DAG_RESPONSE,
id="get_default_version",
),
pytest.param(
{"dag_id": DAG_ID, "version_number": 1},
FIRST_VERSION_DAG_RESPONSE,
id="get_oldest_version",
),
pytest.param(
{"dag_id": DAG_ID, "version_number": 2},
SECOND_VERSION_DAG_RESPONSE,
id="get_specific_version",
),
pytest.param(
{"dag_id": DAG_ID, "version_number": 3},
LATEST_VERSION_DAG_RESPONSE,
id="get_latest_version",
),
],
)
@pytest.mark.usefixtures("make_dag_with_multiple_versions")
def test_should_return_200_with_multiple_versions(self, test_client, params, expected):
response = test_client.get("/structure/structure_data", params=params)
assert response.status_code == 200
assert response.json() == expected
def test_delete_dag_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/structure/structure_data", params={"dag_id": DAG_ID})
assert response.status_code == 401
def test_delete_dag_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/structure/structure_data", params={"dag_id": DAG_ID})
assert response.status_code == 403
def test_should_return_404(self, test_client):
response = test_client.get("/structure/structure_data", params={"dag_id": "not_existing"})
assert response.status_code == 404
assert response.json()["detail"] == "Dag with id not_existing was not found"
def test_should_return_404_when_dag_version_not_found(self, test_client):
response = test_client.get(
"/structure/structure_data", params={"dag_id": DAG_ID, "version_number": 999}
)
assert response.status_code == 404
assert (
response.json()["detail"]
== "Dag with id dag_with_multiple_versions and version number 999 was not found"
)
def test_mapped_operator_graph_view(self, dag_maker, test_client, session):
"""
Ensures structure_data endpoint handles MappedOperator without AttributeError.
"""
from airflow.providers.standard.operators.bash import BashOperator
with dag_maker(
dag_id="test_mapped_operator_dag",
serialized=True,
session=session,
start_date=pendulum.DateTime(2023, 2, 1, 0, 0, 0, tzinfo=pendulum.UTC),
):
task1 = EmptyOperator(task_id="task1")
mapped_task = BashOperator.partial(
task_id="mapped_bash_task",
do_xcom_push=False,
).expand(bash_command=["echo 1", "echo 2", "echo 3"])
task2 = EmptyOperator(task_id="task2")
task1 >> mapped_task >> task2
dag_maker.sync_dagbag_to_db()
response = test_client.get("/structure/structure_data", params={"dag_id": "test_mapped_operator_dag"})
assert response.status_code == 200
data = response.json()
mapped_node = next(node for node in data["nodes"] if node["id"] == "mapped_bash_task")
assert mapped_node["is_mapped"] is True
assert mapped_node["operator"] == "BashOperator"
assert len(data["edges"]) == 2
def test_mapped_operator_in_task_group(self, dag_maker, test_client, session):
"""
Test that mapped operators within task groups are handled correctly.
Specifically tests task_group_to_dict function with MappedOperator instances.
"""
from airflow.providers.standard.operators.python import PythonOperator
from airflow.sdk.definitions.taskgroup import TaskGroup
with dag_maker(
dag_id="test_mapped_in_group_dag",
serialized=True,
session=session,
start_date=pendulum.DateTime(2023, 2, 1, 0, 0, 0, tzinfo=pendulum.UTC),
):
with TaskGroup(group_id="processing_group"):
prep = EmptyOperator(task_id="prep")
mapped = PythonOperator.partial(
task_id="process",
python_callable=lambda x: print(f"Processing {x}"),
).expand(op_args=[[1], [2], [3], [4]])
prep >> mapped
dag_maker.sync_dagbag_to_db()
response = test_client.get("/structure/structure_data", params={"dag_id": "test_mapped_in_group_dag"})
assert response.status_code == 200
data = response.json()
group_node = next(node for node in data["nodes"] if node["id"] == "processing_group")
assert group_node["children"] is not None
mapped_in_group = next(
child for child in group_node["children"] if child["id"] == "processing_group.process"
)
assert mapped_in_group["is_mapped"] is True
assert mapped_in_group["operator"] == "PythonOperator"
@pytest.mark.parametrize(
("params", "expected_task_ids", "description"),
[
pytest.param(
{"dag_id": DAG_ID_LINEAR_DEPTH, "root": "task_a", "include_downstream": True, "depth": 1},
["task_a", "task_b"],
"depth=1 downstream from task_a should return task_a and task_b only",
id="downstream_depth_1",
),
pytest.param(
{"dag_id": DAG_ID_LINEAR_DEPTH, "root": "task_a", "include_downstream": True, "depth": 2},
["task_a", "task_b", "task_c"],
"depth=2 downstream from task_a should return task_a, task_b, and task_c",
id="downstream_depth_2",
),
pytest.param(
{"dag_id": DAG_ID_LINEAR_DEPTH, "root": "task_e", "include_upstream": True, "depth": 1},
["task_d", "task_e"],
"depth=1 upstream from task_e should return task_e and task_d only",
id="upstream_depth_1",
),
pytest.param(
{"dag_id": DAG_ID_LINEAR_DEPTH, "root": "task_e", "include_upstream": True, "depth": 2},
["task_c", "task_d", "task_e"],
"depth=2 upstream from task_e should return task_e, task_d, and task_c",
id="upstream_depth_2",
),
pytest.param(
{
"dag_id": DAG_ID_LINEAR_DEPTH,
"root": "task_c",
"include_upstream": True,
"include_downstream": True,
"depth": 1,
},
["task_b", "task_c", "task_d"],
"depth=1 both directions from task_c should return task_b, task_c, and task_d",
id="both_directions_depth_1",
),
pytest.param(
{
"dag_id": DAG_ID_NONLINEAR_DEPTH,
"root": "start",
"include_downstream": True,
"depth": 1,
},
["branch_a", "branch_b", "start"],
"depth=1 downstream from start in nonlinear DAG should return start and both branches",
id="nonlinear_downstream_depth_1",
),
pytest.param(
{
"dag_id": DAG_ID_NONLINEAR_DEPTH,
"root": "merge",
"include_upstream": True,
"depth": 1,
},
["branch_b", "intermediate", "merge"],
"depth=1 upstream from merge in nonlinear DAG should return merge, branch_b, and intermediate",
id="nonlinear_upstream_depth_1",
),
],
)
@pytest.mark.usefixtures("make_dags")
def test_structure_with_depth(self, test_client, params, expected_task_ids, description):
"""Test that depth parameter limits the number of levels returned in various scenarios."""
response = test_client.get("/structure/structure_data", params=params)
assert response.status_code == 200
data = response.json()
task_ids = sorted([node["id"] for node in data["nodes"]])
assert task_ids == expected_task_ids, description | python | github | https://github.com/apache/airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_structure.py |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing.features;
import static com.google.common.collect.testing.Helpers.copyToSet;
import com.google.common.annotations.GwtCompatible;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.SortedSet;
/**
* Optional features of classes derived from {@code Collection}.
*
* @author George van den Driessche
*/
@SuppressWarnings("rawtypes") // maybe avoidable if we rework the whole package?
@GwtCompatible
public enum CollectionFeature implements Feature<Collection> {
/**
* The collection must not throw {@code NullPointerException} on calls such as {@code
* contains(null)} or {@code remove(null)}, but instead must return a simple {@code false}.
*/
ALLOWS_NULL_QUERIES,
ALLOWS_NULL_VALUES(ALLOWS_NULL_QUERIES),
/**
* Indicates that a collection disallows certain elements (other than {@code null}, whose validity
* as an element is indicated by the presence or absence of {@link #ALLOWS_NULL_VALUES}). From the
* documentation for {@link Collection}:
*
* <blockquote>
*
* "Some collection implementations have restrictions on the elements that they may contain. For
* example, some implementations prohibit null elements, and some have restrictions on the types
* of their elements."
*
* </blockquote>
*/
RESTRICTS_ELEMENTS,
/**
* Indicates that a collection has a well-defined ordering of its elements. The ordering may
* depend on the element values, such as a {@link SortedSet}, or on the insertion ordering, such
* as a {@link LinkedHashSet}. All list tests and sorted-collection tests automatically specify
* this feature.
*/
KNOWN_ORDER,
/**
* Indicates that a collection has a different {@link Object#toString} representation than most
* collections. If not specified, the collection tests will examine the value returned by {@link
* Object#toString}.
*/
NON_STANDARD_TOSTRING,
/**
* Indicates that the constructor or factory method of a collection, usually an immutable set,
* throws an {@link IllegalArgumentException} when presented with duplicate elements instead of
* collapsing them to a single element or including duplicate instances in the collection.
*/
REJECTS_DUPLICATES_AT_CREATION,
SUPPORTS_ADD,
SUPPORTS_REMOVE,
SUPPORTS_ITERATOR_REMOVE,
FAILS_FAST_ON_CONCURRENT_MODIFICATION,
/**
* Features supported by general-purpose collections - everything but {@link #RESTRICTS_ELEMENTS}.
*
* @see java.util.Collection the definition of general-purpose collections.
*/
GENERAL_PURPOSE(SUPPORTS_ADD, SUPPORTS_REMOVE, SUPPORTS_ITERATOR_REMOVE),
/** Features supported by collections where only removal is allowed. */
REMOVE_OPERATIONS(SUPPORTS_REMOVE, SUPPORTS_ITERATOR_REMOVE),
SERIALIZABLE,
SERIALIZABLE_INCLUDING_VIEWS(SERIALIZABLE),
SUBSET_VIEW,
DESCENDING_VIEW,
/**
* For documenting collections that support no optional features, such as {@link
* java.util.Collections#emptySet}
*/
NONE;
private final Set<Feature<? super Collection>> implied;
CollectionFeature(Feature<? super Collection>... implied) {
this.implied = copyToSet(implied);
}
@Override
public Set<Feature<? super Collection>> getImpliedFeatures() {
return implied;
}
@Retention(RetentionPolicy.RUNTIME)
@Inherited
@TesterAnnotation
public @interface Require {
CollectionFeature[] value() default {};
CollectionFeature[] absent() default {};
}
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/features/CollectionFeature.java |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Parser for PPAPI IDL """
#
# IDL Parser
#
# The parser is uses the PLY yacc library to build a set of parsing rules based
# on WebIDL.
#
# WebIDL, and WebIDL regular expressions can be found at:
# http://dev.w3.org/2006/webapi/WebIDL/
# PLY can be found at:
# http://www.dabeaz.com/ply/
#
# The parser generates a tree by recursively matching sets of items against
# defined patterns. When a match is made, that set of items is reduced
# to a new item. The new item can provide a match for parent patterns.
# In this way an AST is built (reduced) depth first.
import getopt
import glob
import os.path
import re
import sys
import time
from idl_ast import IDLAst
from idl_log import ErrOut, InfoOut, WarnOut
from idl_lexer import IDLLexer
from idl_node import IDLAttribute, IDLFile, IDLNode
from idl_option import GetOption, Option, ParseOptions
from idl_lint import Lint
from idl_visitor import IDLVisitor
from ply import lex
from ply import yacc
Option('build_debug', 'Debug tree building.')
Option('parse_debug', 'Debug parse reduction steps.')
Option('token_debug', 'Debug token generation.')
Option('dump_tree', 'Dump the tree.')
Option('srcroot', 'Working directory.', default=os.path.join('..', 'api'))
Option('include_private', 'Include private IDL directory in default API paths.')
#
# ERROR_REMAP
#
# Maps the standard error formula into a more friendly error message.
#
ERROR_REMAP = {
'Unexpected ")" after "(".' : 'Empty argument list.',
'Unexpected ")" after ",".' : 'Missing argument.',
'Unexpected "}" after ",".' : 'Trailing comma in block.',
'Unexpected "}" after "{".' : 'Unexpected empty block.',
'Unexpected comment after "}".' : 'Unexpected trailing comment.',
'Unexpected "{" after keyword "enum".' : 'Enum missing name.',
'Unexpected "{" after keyword "struct".' : 'Struct missing name.',
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
# DumpReduction
#
# Prints out the set of items which matched a particular pattern and the
# new item or set it was reduced to.
def DumpReduction(cls, p):
if p[0] is None:
InfoOut.Log("OBJ: %s(%d) - None\n" % (cls, len(p)))
InfoOut.Log(" [%s]\n" % [str(x) for x in p[1:]])
else:
out = ""
for index in range(len(p) - 1):
out += " >%s< " % str(p[index + 1])
InfoOut.Log("OBJ: %s(%d) - %s : %s\n" % (cls, len(p), str(p[0]), out))
# CopyToList
#
# Takes an input item, list, or None, and returns a new list of that set.
def CopyToList(item):
# If the item is 'Empty' make it an empty list
if not item: item = []
# If the item is not a list
if type(item) is not type([]): item = [item]
# Make a copy we can modify
return list(item)
# ListFromConcat
#
# Generate a new List by joining of two sets of inputs which can be an
# individual item, a list of items, or None.
def ListFromConcat(*items):
itemsout = []
for item in items:
itemlist = CopyToList(item)
itemsout.extend(itemlist)
return itemsout
# TokenTypeName
#
# Generate a string which has the type and value of the token.
def TokenTypeName(t):
if t.type == 'SYMBOL': return 'symbol %s' % t.value
if t.type in ['HEX', 'INT', 'OCT', 'FLOAT']:
return 'value %s' % t.value
if t.type == 'STRING' : return 'string "%s"' % t.value
if t.type == 'COMMENT' : return 'comment'
if t.type == t.value: return '"%s"' % t.value
return 'keyword "%s"' % t.value
#
# IDL Parser
#
# The Parser inherits the from the Lexer to provide PLY with the tokenizing
# definitions. Parsing patterns are encoded as function where p_<name> is
# is called any time a patern matching the function documentation is found.
# Paterns are expressed in the form of:
# """ <new item> : <item> ....
# | <item> ...."""
#
# Where new item is the result of a match against one or more sets of items
# separated by the "|".
#
# The function is called with an object 'p' where p[0] is the output object
# and p[n] is the set of inputs for positive values of 'n'. Len(p) can be
# used to distinguish between multiple item sets in the pattern.
#
# For more details on parsing refer to the PLY documentation at
# http://www.dabeaz.com/ply/
#
#
# The parser uses the following conventions:
# a <type>_block defines a block of <type> definitions in the form of:
# [comment] [ext_attr_block] <type> <name> '{' <type>_list '}' ';'
# A block is reduced by returning an object of <type> with a name of <name>
# which in turn has <type>_list as children.
#
# A [comment] is a optional C style comment block enclosed in /* ... */ which
# is appended to the adjacent node as a child.
#
# A [ext_attr_block] is an optional list of Extended Attributes which is
# appended to the adjacent node as a child.
#
# a <type>_list defines a list of <type> items which will be passed as a
# list of children to the parent pattern. A list is in the form of:
# [comment] [ext_attr_block] <...DEF...> ';' <type>_list | (empty)
# or
# [comment] [ext_attr_block] <...DEF...> <type>_cont
#
# In the first form, the list is reduced recursively, where the right side
# <type>_list is first reduced then joined with pattern currently being
# matched. The list is terminated with the (empty) pattern is matched.
#
# In the second form the list is reduced recursively, where the right side
# <type>_cont is first reduced then joined with the pattern currently being
# matched. The type_<cont> is in the form of:
# ',' <type>_list | (empty)
# The <type>_cont form is used to consume the ',' which only occurs when
# there is more than one object in the list. The <type>_cont also provides
# the terminating (empty) definition.
#
class IDLParser(IDLLexer):
# TOP
#
# This pattern defines the top of the parse tree. The parse tree is in the
# the form of:
#
# top
# *modifiers
# *comments
# *ext_attr_block
# ext_attr_list
# attr_arg_list
# *integer, value
# *param_list
# *typeref
#
# top_list
# describe_block
# describe_list
# enum_block
# enum_item
# interface_block
# member
# label_block
# label_item
# struct_block
# member
# typedef_decl
# typedef_data
# typedef_func
#
# (* sub matches found at multiple levels and are not truly children of top)
#
# We force all input files to start with two comments. The first comment is a
# Copyright notice followed by a set of file wide Extended Attributes, followed
# by the file comment and finally by file level patterns.
#
# Find the Copyright, File comment, and optional file wide attributes. We
# use a match with COMMENT instead of comments to force the token to be
# present. The extended attributes and the top_list become siblings which
# in turn are children of the file object created from the results of top.
def p_top(self, p):
"""top : COMMENT COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = self.BuildComment('Comment', p, 2)
p[0] = ListFromConcat(Copyright, Filedoc, p[3], p[4])
if self.parse_debug: DumpReduction('top', p)
def p_top_short(self, p):
"""top : COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = IDLNode('Comment', self.lexobj.filename, p.lineno(2)-1,
p.lexpos(2)-1, [self.BuildAttribute('NAME', ''),
self.BuildAttribute('FORM', 'cc')])
p[0] = ListFromConcat(Copyright, Filedoc, p[2], p[3])
if self.parse_debug: DumpReduction('top', p)
# Build a list of top level items.
def p_top_list(self, p):
"""top_list : callback_decl top_list
| describe_block top_list
| dictionary_block top_list
| enum_block top_list
| inline top_list
| interface_block top_list
| label_block top_list
| namespace top_list
| struct_block top_list
| typedef_decl top_list
| bad_decl top_list
| """
if len(p) > 2:
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('top_list', p)
# Recover from error and continue parsing at the next top match.
def p_top_error(self, p):
"""top_list : error top_list"""
p[0] = p[2]
# Recover from error and continue parsing at the next top match.
def p_bad_decl(self, p):
"""bad_decl : modifiers SYMBOL error '}' ';'"""
p[0] = []
#
# Modifier List
#
#
def p_modifiers(self, p):
"""modifiers : comments ext_attr_block"""
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('modifiers', p)
#
# Comments
#
# Comments are optional list of C style comment objects. Comments are returned
# as a list or None.
#
def p_comments(self, p):
"""comments : COMMENT comments
| """
if len(p) > 1:
child = self.BuildComment('Comment', p, 1)
p[0] = ListFromConcat(child, p[2])
if self.parse_debug: DumpReduction('comments', p)
else:
if self.parse_debug: DumpReduction('no comments', p)
#
# Namespace
#
# A namespace provides a named scope to an enclosed top_list.
#
def p_namespace(self, p):
"""namespace : modifiers NAMESPACE namespace_name '{' top_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Namespace', p, 3, children)
# We allow namespace names of the form foo.bar.baz.
def p_namespace_name(self, p):
"""namespace_name : SYMBOL
| SYMBOL '.' namespace_name"""
p[0] = "".join(p[1:])
#
# Dictionary
#
# A dictionary is a named list of optional and required members.
#
def p_dictionary_block(self, p):
"""dictionary_block : modifiers DICTIONARY SYMBOL '{' struct_list '}' ';'"""
p[0] = self.BuildNamed('Dictionary', p, 3, ListFromConcat(p[1], p[5]))
#
# Callback
#
# A callback is essentially a single function declaration (outside of an
# Interface).
#
def p_callback_decl(self, p):
"""callback_decl : modifiers CALLBACK SYMBOL '=' SYMBOL param_list ';'"""
children = ListFromConcat(p[1], p[6])
p[0] = self.BuildNamed('Callback', p, 3, children)
#
# Inline
#
# Inline blocks define option code to be emitted based on language tag,
# in the form of:
# #inline <LANGUAGE>
# <CODE>
# #endinl
#
def p_inline(self, p):
"""inline : modifiers INLINE"""
words = p[2].split()
name = self.BuildAttribute('NAME', words[1])
lines = p[2].split('\n')
value = self.BuildAttribute('VALUE', '\n'.join(lines[1:-1]) + '\n')
children = ListFromConcat(name, value, p[1])
p[0] = self.BuildProduction('Inline', p, 2, children)
if self.parse_debug: DumpReduction('inline', p)
# Extended Attributes
#
# Extended Attributes denote properties which will be applied to a node in the
# AST. A list of extended attributes are denoted by a brackets '[' ... ']'
# enclosing a comma separated list of extended attributes in the form of:
#
# Name
# Name=HEX | INT | OCT | FLOAT
# Name="STRING"
# Name=Function(arg ...)
# TODO(noelallen) -Not currently supported:
# ** Name(arg ...) ...
# ** Name=Scope::Value
#
# Extended Attributes are returned as a list or None.
def p_ext_attr_block(self, p):
"""ext_attr_block : '[' ext_attr_list ']'
| """
if len(p) > 1:
p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attr_block', p)
else:
if self.parse_debug: DumpReduction('no ext_attr_block', p)
def p_ext_attr_list(self, p):
"""ext_attr_list : SYMBOL '=' SYMBOL ext_attr_cont
| SYMBOL '=' value ext_attr_cont
| SYMBOL '=' SYMBOL param_list ext_attr_cont
| SYMBOL ext_attr_cont"""
# If there are 4 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL|value ext_attr_cont
if len(p) == 5:
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[3]), p[4])
# If there are 5 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL (param_list) ext_attr_cont
elif len(p) == 6:
member = self.BuildNamed('Member', p, 3, [p[4]])
p[0] = ListFromConcat(self.BuildAttribute(p[1], member), p[5])
# Otherwise, this must be: SYMBOL ext_attr_cont
else:
p[0] = ListFromConcat(self.BuildAttribute(p[1], 'True'), p[2])
if self.parse_debug: DumpReduction('ext_attribute_list', p)
def p_ext_attr_list_values(self, p):
"""ext_attr_list : SYMBOL '=' '(' values ')' ext_attr_cont
| SYMBOL '=' '(' symbols ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[4]), p[6])
def p_values(self, p):
"""values : value values_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols(self, p):
"""symbols : SYMBOL symbols_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols_cont(self, p):
"""symbols_cont : ',' SYMBOL symbols_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_values_cont(self, p):
"""values_cont : ',' value values_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_ext_attr_cont(self, p):
"""ext_attr_cont : ',' ext_attr_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attribute_cont', p)
def p_ext_attr_func(self, p):
"""ext_attr_list : SYMBOL '(' attr_arg_list ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1] + '()', p[3]), p[5])
if self.parse_debug: DumpReduction('attr_arg_func', p)
def p_ext_attr_arg_list(self, p):
"""attr_arg_list : SYMBOL attr_arg_cont
| value attr_arg_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_attr_arg_cont(self, p):
"""attr_arg_cont : ',' attr_arg_list
| """
if self.parse_debug: DumpReduction('attr_arg_cont', p)
if len(p) > 1: p[0] = p[2]
def p_attr_arg_error(self, p):
"""attr_arg_cont : error attr_arg_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('attr_arg_error', p)
#
# Describe
#
# A describe block is defined at the top level. It provides a mechanism for
# attributing a group of ext_attr to a describe_list. Members of the
# describe list are language specific 'Type' declarations
#
def p_describe_block(self, p):
"""describe_block : modifiers DESCRIBE '{' describe_list '}' ';'"""
children = ListFromConcat(p[1], p[4])
p[0] = self.BuildProduction('Describe', p, 2, children)
if self.parse_debug: DumpReduction('describe_block', p)
# Recover from describe error and continue parsing at the next top match.
def p_describe_error(self, p):
"""describe_list : error describe_list"""
p[0] = []
def p_describe_list(self, p):
"""describe_list : modifiers SYMBOL ';' describe_list
| modifiers ENUM ';' describe_list
| modifiers STRUCT ';' describe_list
| modifiers TYPEDEF ';' describe_list
| """
if len(p) > 1:
Type = self.BuildNamed('Type', p, 2, p[1])
p[0] = ListFromConcat(Type, p[4])
#
# Constant Values (integer, value)
#
# Constant values can be found at various levels. A Constant value is returns
# as the string value after validated against a FLOAT, HEX, INT, OCT or
# STRING pattern as appropriate.
#
def p_value(self, p):
"""value : FLOAT
| HEX
| INT
| OCT
| STRING"""
p[0] = p[1]
if self.parse_debug: DumpReduction('value', p)
def p_value_lshift(self, p):
"""value : integer LSHIFT INT"""
p[0] = "%s << %s" % (p[1], p[3])
if self.parse_debug: DumpReduction('value', p)
# Integers are numbers which may not be floats used in cases like array sizes.
def p_integer(self, p):
"""integer : HEX
| INT
| OCT"""
p[0] = p[1]
if self.parse_debug: DumpReduction('integer', p)
#
# Expression
#
# A simple arithmetic expression.
#
precedence = (
('left','|','&','^'),
('left','LSHIFT','RSHIFT'),
('left','+','-'),
('left','*','/'),
('right','UMINUS','~'),
)
def p_expression_binop(self, p):
"""expression : expression LSHIFT expression
| expression RSHIFT expression
| expression '|' expression
| expression '&' expression
| expression '^' expression
| expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression"""
p[0] = "%s %s %s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_binop', p)
def p_expression_unop(self, p):
"""expression : '-' expression %prec UMINUS
| '~' expression %prec '~'"""
p[0] = "%s%s" % (str(p[1]), str(p[2]))
if self.parse_debug: DumpReduction('expression_unop', p)
def p_expression_term(self, p):
"expression : '(' expression ')'"
p[0] = "%s%s%s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_term', p)
def p_expression_symbol(self, p):
"expression : SYMBOL"
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_symbol', p)
def p_expression_integer(self, p):
"expression : integer"
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_integer', p)
#
# Array List
#
# Defined a list of array sizes (if any).
#
def p_arrays(self, p):
"""arrays : '[' ']' arrays
| '[' integer ']' arrays
| """
# If there are 3 tokens plus a return slot it is an unsized array
if len(p) == 4:
array = self.BuildProduction('Array', p, 1)
p[0] = ListFromConcat(array, p[3])
# If there are 4 tokens plus a return slot it is a fixed array
elif len(p) == 5:
count = self.BuildAttribute('FIXED', p[2])
array = self.BuildProduction('Array', p, 2, [count])
p[0] = ListFromConcat(array, p[4])
# If there is only a return slot, do not fill it for this terminator.
elif len(p) == 1: return
if self.parse_debug: DumpReduction('arrays', p)
# An identifier is a legal value for a parameter or attribute name. Lots of
# existing IDL files use "callback" as a parameter/attribute name, so we allow
# a SYMBOL or the CALLBACK keyword.
def p_identifier(self, p):
"""identifier : SYMBOL
| CALLBACK"""
p[0] = p[1]
# Save the line number of the underlying token (otherwise it gets
# discarded), since we use it in the productions with an identifier in
# them.
p.set_lineno(0, p.lineno(1))
#
# Parameter List
#
# A parameter list is a collection of arguments which are passed to a
# function.
#
def p_param_list(self, p):
"""param_list : '(' param_item param_cont ')'
| '(' ')' """
if len(p) > 3:
args = ListFromConcat(p[2], p[3])
else:
args = []
p[0] = self.BuildProduction('Callspec', p, 1, args)
if self.parse_debug: DumpReduction('param_list', p)
def p_param_item(self, p):
"""param_item : modifiers optional SYMBOL arrays identifier"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[4])
p[0] = self.BuildNamed('Param', p, 5, children)
if self.parse_debug: DumpReduction('param_item', p)
def p_optional(self, p):
"""optional : OPTIONAL
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
def p_param_cont(self, p):
"""param_cont : ',' param_item param_cont
| """
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
if self.parse_debug: DumpReduction('param_cont', p)
def p_param_error(self, p):
"""param_cont : error param_cont"""
p[0] = p[2]
#
# Typedef
#
# A typedef creates a new referencable type. The typedef can specify an array
# definition as well as a function declaration.
#
def p_typedef_data(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref)
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_data', p)
def p_typedef_array(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL arrays SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[4])
p[0] = self.BuildNamed('Typedef', p, 5, children)
if self.parse_debug: DumpReduction('typedef_array', p)
def p_typedef_func(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL param_list ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[5])
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_func', p)
#
# Enumeration
#
# An enumeration is a set of named integer constants. An enumeration
# is valid type which can be referenced in other definitions.
#
def p_enum_block(self, p):
"""enum_block : modifiers ENUM SYMBOL '{' enum_list '}' ';'"""
p[0] = self.BuildNamed('Enum', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('enum_block', p)
# Recover from enum error and continue parsing at the next top match.
def p_enum_errorA(self, p):
"""enum_block : modifiers ENUM error '{' enum_list '}' ';'"""
p[0] = []
def p_enum_errorB(self, p):
"""enum_block : modifiers ENUM error ';'"""
p[0] = []
def p_enum_list(self, p):
"""enum_list : modifiers SYMBOL '=' expression enum_cont
| modifiers SYMBOL enum_cont"""
if len(p) > 4:
val = self.BuildAttribute('VALUE', p[4])
enum = self.BuildNamed('EnumItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(enum, p[5])
else:
enum = self.BuildNamed('EnumItem', p, 2, p[1])
p[0] = ListFromConcat(enum, p[3])
if self.parse_debug: DumpReduction('enum_list', p)
def p_enum_cont(self, p):
"""enum_cont : ',' enum_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('enum_cont', p)
def p_enum_cont_error(self, p):
"""enum_cont : error enum_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('enum_error', p)
#
# Label
#
# A label is a special kind of enumeration which allows us to go from a
# set of labels
#
def p_label_block(self, p):
"""label_block : modifiers LABEL SYMBOL '{' label_list '}' ';'"""
p[0] = self.BuildNamed('Label', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('label_block', p)
def p_label_list(self, p):
"""label_list : modifiers SYMBOL '=' FLOAT label_cont"""
val = self.BuildAttribute('VALUE', p[4])
label = self.BuildNamed('LabelItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(label, p[5])
if self.parse_debug: DumpReduction('label_list', p)
def p_label_cont(self, p):
"""label_cont : ',' label_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('label_cont', p)
def p_label_cont_error(self, p):
"""label_cont : error label_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('label_error', p)
#
# Members
#
# A member attribute or function of a struct or interface.
#
def p_member_attribute(self, p):
"""member_attribute : modifiers SYMBOL arrays questionmark identifier"""
typeref = self.BuildAttribute('TYPEREF', p[2])
children = ListFromConcat(p[1], typeref, p[3], p[4])
p[0] = self.BuildNamed('Member', p, 5, children)
if self.parse_debug: DumpReduction('attribute', p)
def p_member_function(self, p):
"""member_function : modifiers static SYMBOL SYMBOL param_list"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[5])
p[0] = self.BuildNamed('Member', p, 4, children)
if self.parse_debug: DumpReduction('function', p)
def p_static(self, p):
"""static : STATIC
| """
if len(p) == 2:
p[0] = self.BuildAttribute('STATIC', True)
def p_questionmark(self, p):
"""questionmark : '?'
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
#
# Interface
#
# An interface is a named collection of functions.
#
def p_interface_block(self, p):
"""interface_block : modifiers INTERFACE SYMBOL '{' interface_list '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('interface_block', p)
def p_interface_error(self, p):
"""interface_block : modifiers INTERFACE error '{' interface_list '}' ';'"""
p[0] = []
def p_interface_list(self, p):
"""interface_list : member_function ';' interface_list
| """
if len(p) > 1 :
p[0] = ListFromConcat(p[1], p[3])
if self.parse_debug: DumpReduction('interface_list', p)
#
# Struct
#
# A struct is a named collection of members which in turn reference other
# types. The struct is a referencable type.
#
def p_struct_block(self, p):
"""struct_block : modifiers STRUCT SYMBOL '{' struct_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Struct', p, 3, children)
if self.parse_debug: DumpReduction('struct_block', p)
# Recover from struct error and continue parsing at the next top match.
def p_struct_error(self, p):
"""enum_block : modifiers STRUCT error '{' struct_list '}' ';'"""
p[0] = []
def p_struct_list(self, p):
"""struct_list : member_attribute ';' struct_list
| member_function ';' struct_list
|"""
if len(p) > 1: p[0] = ListFromConcat(p[1], p[3])
#
# Parser Errors
#
# p_error is called whenever the parser can not find a pattern match for
# a set of items from the current state. The p_error function defined here
# is triggered logging an error, and parsing recover happens as the
# p_<type>_error functions defined above are called. This allows the parser
# to continue so as to capture more than one error per file.
#
def p_error(self, t):
filename = self.lexobj.filename
self.parse_errors += 1
if t:
lineno = t.lineno
pos = t.lexpos
prev = self.yaccobj.symstack[-1]
if type(prev) == lex.LexToken:
msg = "Unexpected %s after %s." % (
TokenTypeName(t), TokenTypeName(prev))
else:
msg = "Unexpected %s." % (t.value)
else:
lineno = self.last.lineno
pos = self.last.lexpos
msg = "Unexpected end of file after %s." % TokenTypeName(self.last)
self.yaccobj.restart()
# Attempt to remap the error to a friendlier form
if msg in ERROR_REMAP:
msg = ERROR_REMAP[msg]
# Log the error
ErrOut.LogLine(filename, lineno, pos, msg)
def Warn(self, node, msg):
WarnOut.LogLine(node.filename, node.lineno, node.pos, msg)
self.parse_warnings += 1
def __init__(self):
IDLLexer.__init__(self)
self.yaccobj = yacc.yacc(module=self, tabmodule=None, debug=False,
optimize=0, write_tables=0)
self.build_debug = GetOption('build_debug')
self.parse_debug = GetOption('parse_debug')
self.token_debug = GetOption('token_debug')
self.verbose = GetOption('verbose')
self.parse_errors = 0
#
# Tokenizer
#
# The token function returns the next token provided by IDLLexer for matching
# against the leaf paterns.
#
def token(self):
tok = self.lexobj.token()
if tok:
self.last = tok
if self.token_debug:
InfoOut.Log("TOKEN %s(%s)" % (tok.type, tok.value))
return tok
#
# BuildProduction
#
# Production is the set of items sent to a grammar rule resulting in a new
# item being returned.
#
# p - Is the Yacc production object containing the stack of items
# index - Index into the production of the name for the item being produced.
# cls - The type of item being producted
# childlist - The children of the new item
def BuildProduction(self, cls, p, index, childlist=None):
if not childlist: childlist = []
filename = self.lexobj.filename
lineno = p.lineno(index)
pos = p.lexpos(index)
out = IDLNode(cls, filename, lineno, pos, childlist)
if self.build_debug:
InfoOut.Log("Building %s" % out)
return out
def BuildNamed(self, cls, p, index, childlist=None):
if not childlist: childlist = []
childlist.append(self.BuildAttribute('NAME', p[index]))
return self.BuildProduction(cls, p, index, childlist)
def BuildComment(self, cls, p, index):
name = p[index]
# Remove comment markers
lines = []
if name[:2] == '//':
# For C++ style, remove any leading whitespace and the '//' marker from
# each line.
form = 'cc'
for line in name.split('\n'):
start = line.find('//')
lines.append(line[start+2:])
else:
# For C style, remove ending '*/''
form = 'c'
for line in name[:-2].split('\n'):
# Remove characters until start marker for this line '*' if found
# otherwise it should be blank.
offs = line.find('*')
if offs >= 0:
line = line[offs + 1:].rstrip()
else:
line = ''
lines.append(line)
name = '\n'.join(lines)
childlist = [self.BuildAttribute('NAME', name),
self.BuildAttribute('FORM', form)]
return self.BuildProduction(cls, p, index, childlist)
#
# BuildAttribute
#
# An ExtendedAttribute is a special production that results in a property
# which is applied to the adjacent item. Attributes have no children and
# instead represent key/value pairs.
#
def BuildAttribute(self, key, val):
return IDLAttribute(key, val)
#
# ParseData
#
# Attempts to parse the current data loaded in the lexer.
#
def ParseData(self, data, filename='<Internal>'):
self.SetData(filename, data)
try:
self.parse_errors = 0
self.parse_warnings = 0
return self.yaccobj.parse(lexer=self)
except lex.LexError as le:
ErrOut.Log(str(le))
return []
#
# ParseFile
#
# Loads a new file into the lexer and attemps to parse it.
#
def ParseFile(self, filename):
date = time.ctime(os.path.getmtime(filename))
data = open(filename).read()
if self.verbose:
InfoOut.Log("Parsing %s" % filename)
try:
out = self.ParseData(data, filename)
# If we have a src root specified, remove it from the path
srcroot = GetOption('srcroot')
if srcroot and filename.find(srcroot) == 0:
filename = filename[len(srcroot) + 1:]
filenode = IDLFile(filename, out, self.parse_errors + self.lex_errors)
filenode.SetProperty('DATETIME', date)
return filenode
except Exception as e:
ErrOut.LogLine(filename, self.last.lineno, self.last.lexpos,
'Internal parsing error - %s.' % str(e))
raise
#
# Flatten Tree
#
# Flattens the tree of IDLNodes for use in testing.
#
def FlattenTree(node):
add_self = False
out = []
for child in node.children:
if child.IsA('Comment'):
add_self = True
else:
out.extend(FlattenTree(child))
if add_self:
out = [str(node)] + out
return out
def TestErrors(filename, filenode):
nodelist = filenode.GetChildren()
lexer = IDLLexer()
data = open(filename).read()
lexer.SetData(filename, data)
pass_comments = []
fail_comments = []
while True:
tok = lexer.lexobj.token()
if tok == None: break
if tok.type == 'COMMENT':
args = tok.value[3:-3].split()
if args[0] == 'OK':
pass_comments.append((tok.lineno, ' '.join(args[1:])))
else:
if args[0] == 'FAIL':
fail_comments.append((tok.lineno, ' '.join(args[1:])))
obj_list = []
for node in nodelist:
obj_list.extend(FlattenTree(node))
errors = 0
#
# Check for expected successes
#
obj_cnt = len(obj_list)
pass_cnt = len(pass_comments)
if obj_cnt != pass_cnt:
InfoOut.Log("Mismatched pass (%d) vs. nodes built (%d)."
% (pass_cnt, obj_cnt))
InfoOut.Log("PASS: %s" % [x[1] for x in pass_comments])
InfoOut.Log("OBJS: %s" % obj_list)
errors += 1
if pass_cnt > obj_cnt: pass_cnt = obj_cnt
for i in range(pass_cnt):
line, comment = pass_comments[i]
if obj_list[i] != comment:
ErrOut.LogLine(filename, line, None, "OBJ %s : EXPECTED %s\n" %
(obj_list[i], comment))
errors += 1
#
# Check for expected errors
#
err_list = ErrOut.DrainLog()
err_cnt = len(err_list)
fail_cnt = len(fail_comments)
if err_cnt != fail_cnt:
InfoOut.Log("Mismatched fail (%d) vs. errors seen (%d)."
% (fail_cnt, err_cnt))
InfoOut.Log("FAIL: %s" % [x[1] for x in fail_comments])
InfoOut.Log("ERRS: %s" % err_list)
errors += 1
if fail_cnt > err_cnt: fail_cnt = err_cnt
for i in range(fail_cnt):
line, comment = fail_comments[i]
err = err_list[i].strip()
if err_list[i] != comment:
ErrOut.Log("%s(%d) Error\n\tERROR : %s\n\tEXPECT: %s" % (
filename, line, err_list[i], comment))
errors += 1
# Clear the error list for the next run
err_list = []
return errors
def TestFile(parser, filename):
# Capture errors instead of reporting them so we can compare them
# with the expected errors.
ErrOut.SetConsole(False)
ErrOut.SetCapture(True)
filenode = parser.ParseFile(filename)
# Renable output
ErrOut.SetConsole(True)
ErrOut.SetCapture(False)
# Compare captured errors
return TestErrors(filename, filenode)
def TestErrorFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_parser', '*.idl')
filenames = glob.glob(idldir)
parser = IDLParser()
total_errs = 0
for filename in filenames:
if filter and filename not in filter: continue
errs = TestFile(parser, filename)
if errs:
ErrOut.Log("%s test failed with %d error(s)." % (filename, errs))
total_errs += errs
if total_errs:
ErrOut.Log("Failed parsing test.")
else:
InfoOut.Log("Passed parsing test.")
return total_errs
def TestNamespaceFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_namespace', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for namespace.')
return 0
InfoOut.SetConsole(False)
ast = ParseFiles(testnames)
InfoOut.SetConsole(True)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log("Failed namespace test.")
else:
InfoOut.Log("Passed namespace test.")
return errs
def FindVersionError(releases, node):
err_cnt = 0
if node.IsA('Interface', 'Struct'):
comment_list = []
comment = node.GetOneOf('Comment')
if comment:
print comment.GetName()
if comment and comment.GetName()[:4] == 'REL:':
comment_list = comment.GetName()[5:].strip().split(' ')
print comment_list
if len(comment_list) != len(releases):
node.Error("Mismatch size of releases: %s vs %s." % (
comment_list, releases))
err_cnt += 1
else:
first_list = [node.first_release[rel] for rel in releases]
if first_list != comment_list:
node.Error("Mismatch in releases: %s vs %s." % (
comment_list, first_list))
err_cnt += 1
for child in node.GetChildren():
err_cnt += FindVersionError(releases, child)
return err_cnt
def TestVersionFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_version', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for version.')
return 0
ast = ParseFiles(testnames)
errs = FindVersionError(ast.releases, ast)
if errs:
ErrOut.Log("Failed version test.")
else:
InfoOut.Log("Passed version test.")
return errs
default_dirs = ['.', 'trusted', 'dev', 'private']
def ParseFiles(filenames):
parser = IDLParser()
filenodes = []
if not filenames:
filenames = []
srcroot = GetOption('srcroot')
dirs = default_dirs
if GetOption('include_private'):
dirs += ['private']
for dirname in dirs:
srcdir = os.path.join(srcroot, dirname, '*.idl')
srcdir = os.path.normpath(srcdir)
filenames += sorted(glob.glob(srcdir))
if not filenames:
ErrOut.Log('No sources provided.')
for filename in filenames:
filenode = parser.ParseFile(filename)
filenodes.append(filenode)
ast = IDLAst(filenodes)
if GetOption('dump_tree'): ast.Dump(0)
Lint(ast)
return ast
def Main(args):
filenames = ParseOptions(args)
# If testing...
if GetOption('test'):
errs = TestErrorFiles(filenames)
errs = TestNamespaceFiles(filenames)
errs = TestVersionFiles(filenames)
if errs:
ErrOut.Log("Parser failed with %d errors." % errs)
return -1
return 0
# Otherwise, build the AST
ast = ParseFiles(filenames)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log('Found %d error(s).' % errs);
InfoOut.Log("%d files processed." % len(filenames))
return errs
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:])) | unknown | codeparrot/codeparrot-clean | ||
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control) | unknown | codeparrot/codeparrot-clean | ||
MASK_IDX = dict([(s,i) for i, s in enumerate(['F3', 'FC6', 'P7', 'T8', 'F7', 'F8', 'T7', 'P8', 'AF4', 'F4', 'AF3', 'O2', 'O1', 'FC5', 'qual'])])
EPOC_MASK = {
'F3': [10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7],
'FC6': [214, 215, 200, 201, 202, 203, 204, 205, 206, 207, 192, 193, 194, 195],
'P7': [84, 85, 86, 87, 72, 73, 74, 75, 76, 77, 78, 79, 64, 65],
'T8': [160, 161, 162, 163, 164, 165, 166, 167, 152, 153, 154, 155, 156, 157],
'F7': [48, 49, 50, 51, 52, 53, 54, 55, 40, 41, 42, 43, 44, 45],
'F8': [178, 179, 180, 181, 182, 183, 168, 169, 170, 171, 172, 173, 174, 175],
'T7': [66, 67, 68, 69, 70, 71, 56, 57, 58, 59, 60, 61, 62, 63],
'P8': [158, 159, 144, 145, 146, 147, 148, 149, 150, 151, 136, 137, 138, 139],
'AF4': [196, 197, 198, 199, 184, 185, 186, 187, 188, 189, 190, 191, 176, 177],
'F4': [216, 217, 218, 219, 220, 221, 222, 223, 208, 209, 210, 211, 212, 213],
'AF3': [46, 47, 32, 33, 34, 35, 36, 37, 38, 39, 24, 25, 26, 27],
'O2': [140, 141, 142, 143, 128, 129, 130, 131, 132, 133, 134, 135, 120, 121],
'O1': [102, 103, 88, 89, 90, 91, 92, 93, 94, 95, 80, 81, 82, 83],
'FC5': [28, 29, 30, 31, 16, 17, 18, 19, 20, 21, 22, 23, 8, 9]
}
PKT_SIZE = 32
SENSOR_DIG_MAX = 8192
SENSOR_PHYS_MAX = 15.7
GYRO_OFFSET = (106, 105)
SENSOR_QUALITY_ORDER = ['F3', 'FC5', 'AF3', 'F7', 'T7', 'P7', 'O1', 'O2', 'P8', 'T8', 'F8', 'AF4', 'FC6', 'F4', 'F8', 'AF4']
QUAL_MASK = [106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
QUAL_NORM_FACTOR = 54.0
BATTERY_MIN = 225 - 128
BATTERY_MAX = 248 - 128
SAMPLE_FREQ = 128
TAIL_LEN = 10 # seconds | unknown | codeparrot/codeparrot-clean | ||
import requests
import json
import sys
import uuid
LOGIN_URL = 'https://simplisafe.com/mobile/login/'
LOGOUT_URL = 'https://simplisafe.com/mobile/logout'
LOCATIONS_URL = 'https://simplisafe.com/mobile/$UID$/locations'
DASHBOARD_URL = 'https://simplisafe.com/mobile/$UID$/sid/$LID$/dashboard'
EVENTS_URL = 'https://simplisafe.com/mobile/$UID$/sid/$LID$/events'
STATE_URL = 'https://simplisafe.com/mobile/$UID$/sid/$LID$/set-state'
class Plugin(indigo.PluginBase):
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
# Initialize variables
self.session = None
self.session_id = None
self.debug = True
self.state = 'unknown'
self.uid = None
self.location = None
self.startSession()
def __del__(self):
self.logout()
indigo.PluginBase.__del__(self)
def deviceStartComm(self, dev):
if self.ensureLogin(dev):
self.getLocation()
if isinstance(dev, indigo.Device):
indigo.server.log("Updating device state")
self.updateDeviceState(dev)
else:
self.abort("Unable to start device")
def deviceStopComm(self, dev):
self.logout()
def armAlarmHome(self, action):
#PluginAction class
deviceId = action.deviceId
device = indigo.devices[deviceId]
self.setAlarm(u"home", device)
def armAlarmAway(self, action):
#PluginAction class
deviceId = action.deviceId
device = indigo.devices[deviceId]
self.setAlarm(u"away", device)
def disarmAlarm(self, action):
#PluginAction class
deviceId = action.deviceId
device = indigo.devices[deviceId]
self.setAlarm(u"off", device)
def getAlarmState(self, action):
#PluginAction class
deviceId = action.deviceId
device = indigo.devices[deviceId]
self.updateDeviceState(device)
def setAlarm(self, state, dev):
if self.ensureLogin(dev):
self.getLocation()
self.setState(state, dev)
# self.get_dashboard()
else:
indigo.server.log("Error logging in.", isError=True)
def abort(self, msg):
indigo.server.log("Error: %s" % msg, isError=True)
def setState(self, state, device):
if state not in ('home', 'away', 'off'):
self.abort("State must be 'home', 'away', or 'off'. You tried '%s'." % state)
return
state_data = {
'state': state,
'mobile': '1',
'no_persist': '1',
'XDEBUG_SESSION_START': 'session_name',
}
indigo.server.log("Setting alarm state to %s" % state)
URL = STATE_URL.replace('$UID$', self.uid).replace('$LID$', self.location)
response = self.session.post(URL, data=state_data)
response_object = json.loads(response.text)
result_codes = {
'2': 'off',
'4': 'home',
'5': 'away',
}
result_code = response_object['result']
self.state = result_codes[str(result_code)].lower()
self.updateDeviceState(device)
indigo.server.log("Alarm State: %s" % self.state)
return self.state
def updateDeviceState(self, dev):
if not self.ensureLogin(dev):
self.abort("Not logged in - unable to update alarm state")
return
#get the current state from the server
self.getLocation()
indigo.server.log("Current State: " + self.state)
stateToDisplay = 'Unknown'
imageToDisplay = indigo.kStateImageSel.SensorOff
if self.state == 'off':
stateToDisplay = 'Disarmed'
imageToDisplay = indigo.kStateImageSel.SensorTripped
elif self.state == 'pending off':
self.state = 'off' # reset 'pending off' to 'off'
stateToDisplay = 'Disarmed'
imageToDisplay = indigo.kStateImageSel.SensorTripped
elif self.state == 'home':
stateToDisplay = 'Armed - Home'
imageToDisplay = indigo.kStateImageSel.SensorOn
elif self.state == 'away':
stateToDisplay = 'Armed - Away'
imageToDisplay = indigo.kStateImageSel.SensorOn
else:
self.state = 'unknown'
indigo.server.log("State NOW: " + self.state)
dev.updateStateOnServer('alarmState', value=self.state, uiValue=stateToDisplay, clearErrorState=True)
dev.updateStateImageOnServer(imageToDisplay)
#
# def get_state(self):
# return self.state
#
# def get_temperature(self):
# return self.temperature
#
def get_dashboard(self):
if not self.uid:
self.abort("You tried to get dashboard without first having a User ID set.")
return
if not self.location:
self.abort("You tried to get dashboard without first having a location set.")
return
dashboard_data = {
'no_persist': '0',
'XDEBUG_SESSION_START': 'session_name',
}
URL = DASHBOARD_URL.replace('$UID$', self.uid).replace('$LID$', self.location)
response = self.session.post(URL, data=dashboard_data)
response_object = json.loads(response.text)
if self.debug:
indigo.server.log("Dashboard Response: %s" % response.text)
response_object = json.loads(response.text)
self.temperature = response_object['location']['monitoring']['freeze']['temp']
if self.debug:
indigo.server.log("Current Temperature: %s" % self.temperature)
def getLocation(self):
if not self.uid:
self.abort("You tried to get location without first having a User ID set.")
return
location_data = {
'no_persist': '0',
'XDEBUG_SESSION_START': 'session_name',
}
URL = LOCATIONS_URL.replace('$UID$', self.uid)
response = self.session.post(URL, data=location_data)
response_object = json.loads(response.text)
# if self.debug:
# print "Location Response: %s" % response.text
self.location = response_object['locations'].keys()[0]
self.state = response_object['locations'][self.location]['system_state'].lower()
def ensureLogin(self, dev):
username = dev.pluginProps["username"]
password = dev.pluginProps["password"]
hasUsername = username is not None and username != u""
hasPassword = password is not None and password != u""
if not username or not password:
self.abort("You must provide a username and password.")
return false
if self.isLoggedIn():
indigo.server.log("Existing Session found. Not logging in again.")
return True
self.startSession()
login_data = {
'name': username,
'pass': password,
'device_name': 'My iPhone',
'device_uuid': str(uuid.uuid1()),
'version': '1100',
'no_persist': '1',
'XDEBUG_SESSION_START': 'session_name',
}
response = self.session.post(LOGIN_URL, data=login_data)
response_object = json.loads(response.text)
self.username = response_object['username']
self.session_id = response_object['session']
self.uid = response_object['uid']
return self.isLoggedIn()
def logout(self):
if isLoggedIn():
logout_data = {
'no_persist': '0',
'XDEBUG_SESSION_START': 'session_name',
}
response = self.session.post(LOGOUT_URL)
response_object = json.loads(response.text)
self.session = None
self.session_id = None
self.uid = None
def isLoggedIn(self):
hasSession = self.session != None and self.session != ""
hasSessionId = self.session_id != None and self.session_id != ""
hasUid = self.uid != None and self.uid != ""
return hasSession and hasUid and hasSessionId
def startSession(self):
# Create a requests session to persist the cookies
self.session = requests.session() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for pie charts."""
import warnings
from graphy import common
from graphy import util
class Segment(common.DataSeries):
"""A single segment of the pie chart.
Object attributes:
size: relative size of the segment
label: label of the segment (if any)
color: color of the segment (if any)
"""
def __init__(self, size, label=None, color=None):
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label looks like a hex triplet; it might be a color. '
'The old argument order (color before label) is '
'deprecated.',
DeprecationWarning, stacklevel=2)
style = common._BasicStyle(color)
super(Segment, self).__init__([size], label=label, style=style)
assert size >= 0
def _GetSize(self):
return self.data[0]
def _SetSize(self, value):
assert value >= 0
self.data[0] = value
size = property(_GetSize, _SetSize,
doc = """The relative size of this pie segment.""")
# Since Segments are so simple, provide color for convenience.
def _GetColor(self):
return self.style.color
def _SetColor(self, color):
self.style.color = color
color = property(_GetColor, _SetColor,
doc = """The color of this pie segment.""")
class PieChart(common.BaseChart):
"""Represents a pie chart.
The pie chart consists of a single "pie" by default, but additional pies
may be added using the AddPie method. The Google Chart API will display
the pies as concentric circles, with pie #0 on the inside; other backends
may display the pies differently.
"""
def __init__(self, points=None, labels=None, colors=None):
"""Constructor for PieChart objects.
Creates a pie chart with a single pie.
Args:
points: A list of data points for the pie chart;
i.e., relative sizes of the pie segments
labels: A list of labels for the pie segments.
TODO: Allow the user to pass in None as one of
the labels in order to skip that label.
colors: A list of colors for the pie segments, as hex strings
(f.ex. '0000ff' for blue). If there are less colors than pie
segments, the Google Chart API will attempt to produce a smooth
color transition between segments by spreading the colors across
them.
"""
super(PieChart, self).__init__()
self.formatters = []
self._colors = None
if points:
self.AddPie(points, labels, colors)
def AddPie(self, points, labels=None, colors=None):
"""Add a whole pie to the chart.
Args:
points: A list of pie segment sizes
labels: A list of labels for the pie segments
colors: A list of colors for the segments. Missing colors will be chosen
automatically.
Return:
The index of the newly added pie.
"""
num_colors = len(colors or [])
num_labels = len(labels or [])
pie_index = len(self.data)
self.data.append([])
for i, pt in enumerate(points):
label = None
if i < num_labels:
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color, pie_index=pie_index)
return pie_index
def AddSegments(self, points, labels, colors):
"""DEPRECATED."""
warnings.warn('PieChart.AddSegments is deprecated. Call AddPie instead. ',
DeprecationWarning, stacklevel=2)
num_colors = len(colors or [])
for i, pt in enumerate(points):
assert pt >= 0
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color)
def AddSegment(self, size, label=None, color=None, pie_index=0):
"""Add a pie segment to this chart, and return the segment.
size: The size of the segment.
label: The label for the segment.
color: The color of the segment, or None to automatically choose the color.
pie_index: The index of the pie that will receive the new segment.
By default, the chart has one pie (pie #0); use the AddPie method to
add more pies.
"""
if isinstance(size, Segment):
warnings.warn("AddSegment(segment) is deprecated. Use AddSegment(size, "
"label, color) instead", DeprecationWarning, stacklevel=2)
segment = size
else:
segment = Segment(size, label=label, color=color)
assert segment.size >= 0
if pie_index == 0 and not self.data:
# Create the default pie
self.data.append([])
assert (pie_index >= 0 and pie_index < len(self.data))
self.data[pie_index].append(segment)
return segment
def AddSeries(self, points, color=None, style=None, markers=None, label=None):
"""DEPRECATED
Add a new segment to the chart and return it.
The segment must contain exactly one data point; all parameters
other than color and label are ignored.
"""
warnings.warn('PieChart.AddSeries is deprecated. Call AddSegment or '
'AddSegments instead.', DeprecationWarning)
return self.AddSegment(Segment(points[0], color=color, label=label))
def SetColors(self, *colors):
"""Change the colors of this chart to the specified list of colors.
Note that this will completely override the individual colors specified
in the pie segments. Missing colors will be interpolated, so that the
list of colors covers all segments in all the pies.
"""
self._colors = colors | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Authors:
# Sam Hewitt <hewittsamuel@gmail.com>
# Guillaume Mazoyer <gmazoyer@gravitons.in>
#
import getopt
import sys
from launchpadlib.launchpad import Launchpad
# Define if we can use ASCII art tables
use_table = True
try:
from texttable import Texttable
except:
use_table = False
# For reference: "ppa:ownername/archivename" or
# "https://launchpad.net/~ownername/+archive/archive-name"
def process_ppa_stats(ppa_owner, ppa_name, versions, archs):
# Login into Launchpad Anoymously
lp_login = Launchpad.login_anonymously('ppastats', 'edge',
'~/.launchpadlib/cache/',
version='devel')
# PPA owner
owner = lp_login.people[ppa_owner]
# PPA name
archive = owner.getPPAByName(name=ppa_name)
# Base URL to Launchpad API
base_url = 'https://api.launchpad.net/devel/ubuntu/{}/{}'
# Print heading
header = 'Download stats for ' + ppa_owner + ' PPA'
print header
print '-' * len(header)
# For each version
for version in versions:
print ''
print 'Packages for ' + version
result = [['Package', 'Version', 'Arch', 'Count']]
# For each architecture
for arch in archs:
url_to_check = base_url.format(version, arch)
for individual_archive in archive.getPublishedBinaries(
status='Published', distro_arch_series=url_to_check):
result.append(
[
individual_archive.binary_package_name,
individual_archive.binary_package_version,
arch,
str(individual_archive.getDownloadCount())
]
)
if not use_table:
# Simple terminal output
for value in result:
print value[0] + "\t" + value[1] + "\t" + value[2] + "\t" + \
value[3]
else:
# Show the result in a beautiful table
table = Texttable()
table.set_cols_dtype(['t', 't', 't', 'i'])
table.set_cols_align(['l', 'r', 'r', 'r'])
table.add_rows(result)
print table.draw()
def usage():
print "Usage: " + sys.argv[0] + " [OPTION]..."
print ""
print " -a, --archs \t specify the architectures (separated by"
print " \t commas) to use"
print " -h, --help \t display this help message"
print " -p, --ppa \t specify the PPA to use with the following"
print " \t format ppa:owner/name"
print " -v, --versions \t specify the Ubuntu versions (separated by"
print " \t commas) to use"
print ""
print "Exit status:"
print " 0 if OK,"
print " 1 if minor problems (e.g., cannot access PPA),"
print " 2 if serious trouble (e.g., cannot access command-line argument)."
print ""
print "Report ppa-stats bugs to:"
print " <https://github.com/respawner/ppa-stats/issues>"
print "ppa-stats home page: <https://github.com/respawner/ppa-stats>"
def main(argv):
ppa_owner = 'java-gnome'
ppa_name = 'ppa'
versions = ['precise', 'trusty', 'utopic', 'vivid']
archs = ['i386', 'amd64']
try:
# Parse the arguments given via the CLI
opts, args = getopt.getopt(argv, 'hp:v:a:',
['help', 'ppa=', 'versions=', 'archs='])
except getopt.GetoptError:
usage()
sys.exit(2)
# Handle arguments
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-p', '--ppa'):
split = arg.split('/')
ppa_owner = split[0].split(':')[1]
ppa_name = split[1]
elif opt in ('-v', '--versions'):
versions = arg.split(',')
elif opt in ('-a', '--archs'):
archs = arg.split(",")
# Process the stats
process_ppa_stats(ppa_owner, ppa_name, versions, archs)
if __name__ == '__main__':
main(sys.argv[1:]) | unknown | codeparrot/codeparrot-clean | ||
'''
make_versions -- index many versions of a project
ALPHA code, will need modification for general use.
'''
import itertools
import re
import subprocess
import sys
import git
from django.core.management.base import BaseCommand
from natsort import natsorted
from app import models
class Project(object):
def __init__(self, name, bad_tag_pat=None, good_tag_pat=None):
self.name = name
self.proj_dir = 'SOURCE/{}'.format(name)
self.good_tag_re = re.compile(good_tag_pat)
self.bad_tag_re = re.compile('never-match')
if bad_tag_pat:
self.bad_tag_re = re.compile(bad_tag_pat)
def get_tags(self):
repos = git.Repo(self.proj_dir)
tags = filter(self.good_tag_re.search, (tag.name for tag in repos.tags))
tags = itertools.ifilterfalse(self.bad_tag_re.search, tags)
tags = natsorted(tags)
return tags
class Command(BaseCommand):
help = 'beer'
# def add_arguments(self, parser):
# parser.add_argument('projects', nargs='*')
# parser.add_argument('--index', action="store_true")
def handle(self, *args, **options):
proj = Project('postgres',
good_tag_pat='^REL',
bad_tag_pat='(ALPHA|BETA|RC|REL2_0'
'|[^9]_._[^0]' # < v9.0, skip micro changes
')')
tags = proj.get_tags()
# tags = tags[:3]
proj_versions = set(models.SourceLine.objects.filter(
project__startswith='postgres-').values_list('project', flat=True))
have_tags = set(projvers.split('-', 1)[1]
for projvers in proj_versions)
checkout_cmd = 'cd {dir} ; git checkout {tag}'
index_cmd = './manage.py make_index --project={name}-{tag} {dir}'
for tag in tags:
if tag in have_tags:
print '(have {}, skipping)'.format(tag)
continue
cmd = checkout_cmd.format(dir=proj.proj_dir, tag=tag)
print '>>>', cmd
if subprocess.call(cmd, shell=True):
sys.exit(0)
cmd = index_cmd.format(dir=proj.proj_dir, name=proj.name, tag=tag)
print '>>>', cmd
out = subprocess.check_output(cmd, shell=True)
print out | unknown | codeparrot/codeparrot-clean | ||
#!/bin/bash
set -euo pipefail
STATUS=$(curl -s "https://api.github.com/repos/elastic/elasticsearch/branches/$BUILDKITE_BRANCH" | jq '.protected')
echo "Branch $BUILDKITE_BRANCH protection status is: $STATUS"
if [[ "$STATUS" == "false" ]]; then
echo "Development branch $BUILDKITE_BRANCH is not set as protected in GitHub but should be."
exit 1
fi | unknown | github | https://github.com/elastic/elasticsearch | .buildkite/scripts/branch-protection.sh |
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Title: libcurl-url
Section: 3
Source: libcurl
See-also:
- CURLOPT_URL (3)
- curl_url (3)
- curl_url_cleanup (3)
- curl_url_dup (3)
- curl_url_get (3)
- curl_url_set (3)
- curl_url_strerror (3)
Protocol:
- All
Added-in: 7.62.0
---
# NAME
libcurl-url - URL interface overview
# DESCRIPTION
The URL interface provides functions for parsing and generating URLs.
# INCLUDE
You still only include \<curl/curl.h\> in your code.
# CREATE
Create a handle that holds URL info and resources with curl_url(3):
~~~c
CURLU *h = curl_url();
~~~
# CLEANUP
When done with it, clean it up with curl_url_cleanup(3)
~~~c
curl_url_cleanup(h);
~~~
# DUPLICATE
When you need a copy of a handle, just duplicate it with curl_url_dup(3):
~~~c
CURLU *nh = curl_url_dup(h);
~~~
# PARSING
By setting a URL to the handle with curl_url_set(3), the URL is parsed
and stored in the handle. If the URL is not syntactically correct it returns
an error instead.
~~~c
rc = curl_url_set(h, CURLUPART_URL,
"https://example.com:449/foo/bar?name=moo", 0);
~~~
The zero in the fourth argument is a bitmask for changing specific features.
If successful, this stores the URL in its individual parts within the handle.
# REDIRECT
When a handle already contains info about a URL, setting a relative URL makes
it "redirect" to that.
~~~c
rc = curl_url_set(h, CURLUPART_URL, "../test?another", 0);
~~~
# GET URL
The **CURLU** handle represents a URL and you can easily extract that with
curl_url_get(3):
~~~c
char *url;
rc = curl_url_get(h, CURLUPART_URL, &url, 0);
curl_free(url);
~~~
The zero in the fourth argument is a bitmask for changing specific features.
# GET PARTS
When a URL has been parsed or parts have been set, you can extract those
pieces from the handle at any time.
~~~c
rc = curl_url_get(h, CURLUPART_FRAGMENT, &fragment, 0);
rc = curl_url_get(h, CURLUPART_HOST, &host, 0);
rc = curl_url_get(h, CURLUPART_PASSWORD, &password, 0);
rc = curl_url_get(h, CURLUPART_PATH, &path, 0);
rc = curl_url_get(h, CURLUPART_PORT, &port, 0);
rc = curl_url_get(h, CURLUPART_QUERY, &query, 0);
rc = curl_url_get(h, CURLUPART_SCHEME, &scheme, 0);
rc = curl_url_get(h, CURLUPART_USER, &user, 0);
rc = curl_url_get(h, CURLUPART_ZONEID, &zoneid, 0);
~~~
Extracted parts are not URL decoded unless the user also asks for it with the
*CURLU_URLDECODE* flag set in the fourth bitmask argument.
Remember to free the returned string with curl_free(3) when you are done
with it.
# SET PARTS
A user set individual URL parts, either after having parsed a full URL or
instead of parsing such.
~~~c
rc = curl_url_set(urlp, CURLUPART_FRAGMENT, "anchor", 0);
rc = curl_url_set(urlp, CURLUPART_HOST, "www.example.com", 0);
rc = curl_url_set(urlp, CURLUPART_PASSWORD, "doe", 0);
rc = curl_url_set(urlp, CURLUPART_PATH, "/index.html", 0);
rc = curl_url_set(urlp, CURLUPART_PORT, "443", 0);
rc = curl_url_set(urlp, CURLUPART_QUERY, "name=john", 0);
rc = curl_url_set(urlp, CURLUPART_SCHEME, "https", 0);
rc = curl_url_set(urlp, CURLUPART_USER, "john", 0);
rc = curl_url_set(urlp, CURLUPART_ZONEID, "eth0", 0);
~~~
Set parts are not URL encoded unless the user asks for it with the
*CURLU_URLENCODE* flag.
# CURLU_APPENDQUERY
An application can append a string to the right end of the query part with the
*CURLU_APPENDQUERY* flag to curl_url_set(3).
Imagine a handle that holds the URL "https://example.com/?shoes=2". An
application can then add the string "hat=1" to the query part like this:
~~~c
rc = curl_url_set(urlp, CURLUPART_QUERY, "hat=1", CURLU_APPENDQUERY);
~~~
It notices the lack of an ampersand (&) separator and injects one, and the
handle's full URL then equals "https://example.com/?shoes=2&hat=1".
The appended string can of course also get URL encoded on add, and if asked to
URL encode, the encoding process skips the '=' character. For example, append
"candy=N&N" to what we already have, and URL encode it to deal with the
ampersand in the data:
~~~c
rc = curl_url_set(urlp, CURLUPART_QUERY, "candy=N&N",
CURLU_APPENDQUERY | CURLU_URLENCODE);
~~~
Now the URL looks like
~~~c
https://example.com/?shoes=2&hat=1&candy=N%26N
~~~
# NOTES
A URL with a literal IPv6 address can be parsed even when IPv6 support is not
enabled. | unknown | github | https://github.com/curl/curl | docs/libcurl/libcurl-url.md |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as the first hook (See DEPS). If it detects that
the build should be clobbered, it will delete the contents of the build
directory.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import errno
import logging
import optparse
import os
import sys
import subprocess
import time
import clobber
import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_build_dir(build_tool, is_iphone=False):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out'
'/mnt/data/b/build/slave/linux/build/src/out'
'/b/build/slave/ios_rel_device/build/src/xcodebuild'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(SRC_DIR, 'xcodebuild')
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
if 'CHROMIUM_OUT_DIR' in os.environ:
output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
if not output_dir:
raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
else:
output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
ret = os.path.join(SRC_DIR, output_dir)
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def clobber_if_necessary(new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_build_dir(landmine_utils.builder())
landmines_path = os.path.normpath(os.path.join(out_dir, '..', '.landmines'))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
if os.path.exists(landmines_path):
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
sys.stdout.write('Clobbering due to:\n')
sys.stdout.writelines(diff)
clobber.clobber(out_dir)
# Save current set of landmines for next time.
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns a list of landmine emitting scripts."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
return options.landmine_scripts + [extra_script]
else:
return options.landmine_scripts
def main():
landmine_scripts = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
clobber_if_necessary(landmines)
return 0
if __name__ == '__main__':
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.context.config;
import org.jspecify.annotations.Nullable;
/**
* Abstract base class for configuration data exceptions.
*
* @author Phillip Webb
* @author Madhura Bhave
* @since 2.4.0
*/
public abstract class ConfigDataException extends RuntimeException {
/**
* Create a new {@link ConfigDataException} instance.
* @param message the exception message
* @param cause the exception cause
*/
protected ConfigDataException(String message, @Nullable Throwable cause) {
super(message, cause);
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataException.java |
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import re
import sys
import copy
import inspect
import traceback
from os.path import expanduser
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
import ansible.module_utils.six.moves.urllib.parse as urlparse
try:
from ansible.release import __version__ as ANSIBLE_VERSION
except ImportError:
ANSIBLE_VERSION = 'unknown'
AZURE_COMMON_ARGS = dict(
auth_source=dict(
type='str',
choices=['auto', 'cli', 'env', 'credential_file']
),
profile=dict(type='str'),
subscription_id=dict(type='str', no_log=True),
client_id=dict(type='str', no_log=True),
secret=dict(type='str', no_log=True),
tenant=dict(type='str', no_log=True),
ad_user=dict(type='str', no_log=True),
password=dict(type='str', no_log=True),
cloud_environment=dict(type='str'),
cert_validation_mode=dict(type='str', choices=['validate', 'ignore'])
# debug=dict(type='bool', default=False),
)
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
)
AZURE_TAG_ARGS = dict(
tags=dict(type='dict'),
append_tags=dict(type='bool', default=True),
)
AZURE_COMMON_REQUIRED_IF = [
('log_mode', 'file', ['log_path'])
]
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
AZURE_SUCCESS_STATE = "Succeeded"
AZURE_FAILED_STATE = "Failed"
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
HAS_MSRESTAZURE = True
HAS_MSRESTAZURE_EXC = None
try:
import importlib
except ImportError:
# This passes the sanity import test, but does not provide a user friendly error message.
# Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
importlib = None
try:
from packaging.version import Version
HAS_PACKAGING_VERSION = True
HAS_PACKAGING_VERSION_EXC = None
except ImportError as exc:
Version = None
HAS_PACKAGING_VERSION = False
HAS_PACKAGING_VERSION_EXC = exc
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
try:
from msrest.serialization import Serializer
except ImportError as exc:
HAS_MSRESTAZURE_EXC = exc
HAS_MSRESTAZURE = False
try:
from enum import Enum
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, is_valid_resource_id
from msrestazure import azure_cloud
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.version import VERSION as network_client_version
from azure.mgmt.storage.version import VERSION as storage_client_version
from azure.mgmt.compute.version import VERSION as compute_client_version
from azure.mgmt.resource.version import VERSION as resource_client_version
from azure.mgmt.dns.version import VERSION as dns_client_version
from azure.mgmt.web.version import VERSION as web_client_version
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.dns import DnsManagementClient
from azure.mgmt.web import WebSiteManagementClient
from azure.mgmt.containerservice import ContainerServiceClient
from azure.storage.cloudstorageaccount import CloudStorageAccount
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
def format_resource_id(val, subscription_id, namespace, types, resource_group):
return resource_id(name=val,
resource_group=resource_group,
namespace=namespace,
type=types,
subscription=subscription_id) if not is_valid_resource_id(val) else val
AZURE_PKG_VERSIONS = {
StorageManagementClient.__name__: {
'package_name': 'storage',
'expected_version': '1.5.0',
'installed_version': storage_client_version
},
ComputeManagementClient.__name__: {
'package_name': 'compute',
'expected_version': '2.0.0',
'installed_version': compute_client_version
},
NetworkManagementClient.__name__: {
'package_name': 'network',
'expected_version': '1.3.0',
'installed_version': network_client_version
},
ResourceManagementClient.__name__: {
'package_name': 'resource',
'expected_version': '1.1.0',
'installed_version': resource_client_version
},
DnsManagementClient.__name__: {
'package_name': 'dns',
'expected_version': '1.0.1',
'installed_version': dns_client_version
},
WebSiteManagementClient.__name__: {
'package_name': 'web',
'expected_version': '0.32.0',
'installed_version': web_client_version
},
} if HAS_AZURE else {}
AZURE_MIN_RELEASE = '2.0.0'
class AzureRMModuleBase(object):
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
merged_arg_spec = dict()
merged_arg_spec.update(AZURE_COMMON_ARGS)
if supports_tags:
merged_arg_spec.update(AZURE_TAG_ARGS)
if derived_arg_spec:
merged_arg_spec.update(derived_arg_spec)
merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
if required_if:
merged_required_if += required_if
self.module = AnsibleModule(argument_spec=merged_arg_spec,
bypass_checks=bypass_checks,
no_log=no_log,
check_invalid_arguments=check_invalid_arguments,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args,
supports_check_mode=supports_check_mode,
required_if=merged_required_if)
if not HAS_PACKAGING_VERSION:
self.fail("Do you have packaging installed? Try `pip install packaging`"
"- {0}".format(HAS_PACKAGING_VERSION_EXC))
if not HAS_MSRESTAZURE:
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
"- {0}".format(HAS_MSRESTAZURE_EXC))
if not HAS_AZURE:
self.fail("Do you have azure>={1} installed? Try `pip install ansible[azure]`"
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
self._cloud_environment = None
self._network_client = None
self._storage_client = None
self._resource_client = None
self._compute_client = None
self._dns_client = None
self._web_client = None
self._containerservice_client = None
self.check_mode = self.module.check_mode
self.facts_module = facts_module
# self.debug = self.module.params.get('debug')
# authenticate
self.credentials = self._get_credentials(self.module.params)
if not self.credentials:
if HAS_AZURE_CLI_CORE:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
else:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
# cert validation mode precedence: module-arg, credential profile, env, "validate"
self._cert_validation_mode = self.module.params['cert_validation_mode'] or self.credentials.get('cert_validation_mode') or \
os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
if self._cert_validation_mode not in ['validate', 'ignore']:
self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
self._cloud_environment = raw_cloud_env
elif not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc(e))
if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('credentials') is not None:
# AzureCLI credentials
self.azure_credentials = self.credentials['credentials']
elif self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common' # SDK default
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password or "
"be logged using AzureCLI.")
# common parameter validation
if self.module.params.get('tags'):
self.validate_tags(self.module.params['tags'])
if not skip_exec:
res = self.exec_module(**self.module.params)
self.module.exit_json(**res)
def check_client_version(self, client_type):
# Ensure Azure modules are at least 2.0.0rc5.
package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
if package_version is not None:
client_name = package_version.get('package_name')
client_version = package_version.get('installed_version')
expected_version = package_version.get('expected_version')
if Version(client_version) < Version(expected_version):
self.fail("Installed {0} client version is {1}. The supported version is {2}. Try "
"`pip install ansible[azure]`".format(client_name, client_version, expected_version))
def exec_module(self, **kwargs):
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
def fail(self, msg, **kwargs):
'''
Shortcut for calling module.fail()
:param msg: Error message text.
:param kwargs: Any key=value pairs
:return: None
'''
self.module.fail_json(msg=msg, **kwargs)
def deprecate(self, msg, version=None):
self.module.deprecate(msg, version)
def log(self, msg, pretty_print=False):
pass
# Use only during module development
# if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
def validate_tags(self, tags):
'''
Check if tags dictionary contains string:string pairs.
:param tags: dictionary of string:string pairs
:return: None
'''
if not self.facts_module:
if not isinstance(tags, dict):
self.fail("Tags must be a dictionary of string:string values.")
for key, value in tags.items():
if not isinstance(value, str):
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
def update_tags(self, tags):
'''
Call from the module to update metadata tags. Returns tuple
with bool indicating if there was a change and dict of new
tags to assign to the object.
:param tags: metadata tags from the object
:return: bool, dict
'''
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
changed = False
if isinstance(self.module.params.get('tags'), dict):
for key, value in self.module.params['tags'].items():
if not new_tags.get(key) or new_tags[key] != value:
changed = True
new_tags[key] = value
if isinstance(tags, dict):
for key, value in tags.items():
if not self.module.params['tags'].get(key):
new_tags.pop(key)
changed = True
return changed, new_tags
def has_tags(self, obj_tags, tag_list):
'''
Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
exists in object tags.
:param obj_tags: dictionary of tags from an Azure object.
:param tag_list: list of tag keys or tag key:value pairs
:return: bool
'''
if not obj_tags and tag_list:
return False
if not tag_list:
return True
matches = 0
result = False
for tag in tag_list:
tag_key = tag
tag_value = None
if ':' in tag:
tag_key, tag_value = tag.split(':')
if tag_value and obj_tags.get(tag_key) == tag_value:
matches += 1
elif not tag_value and obj_tags.get(tag_key):
matches += 1
if matches == len(tag_list):
result = True
return result
def get_resource_group(self, resource_group):
'''
Fetch a resource group.
:param resource_group: name of a resource group
:return: resource group object
'''
try:
return self.rm_client.resource_groups.get(resource_group)
except CloudError as cloud_error:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
except Exception as exc:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
def _get_profile(self, profile="default"):
path = expanduser("~/.azure/credentials")
try:
config = configparser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('subscription_id'):
return credentials
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile']:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials.get('subscription_id') is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = params.get(attribute, None)
auth_source = params.get('auth_source', None)
if not auth_source:
auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
if auth_source == 'cli':
if not HAS_AZURE_CLI_CORE:
self.fail("Azure auth_source is `cli`, but azure-cli package is not available. Try `pip install azure-cli --upgrade`")
try:
self.log('Retrieving credentials from Azure CLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as err:
self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
if auth_source == 'env':
self.log('Retrieving credentials from environment')
env_credentials = self._get_env_credentials()
return env_credentials
if auth_source == 'credential_file':
self.log("Retrieving credentials from credential file")
profile = params.get('profile', 'default')
default_credentials = self._get_profile(profile)
return default_credentials
# auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def serialize_obj(self, obj, class_name, enum_modules=None):
'''
Return a JSON representation of an Azure object.
:param obj: Azure object
:param class_name: Name of the object's class
:param enum_modules: List of module names to build enum dependencies from.
:return: serialized result
'''
enum_modules = [] if enum_modules is None else enum_modules
dependencies = dict()
if enum_modules:
for module_name in enum_modules:
mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ")
self.log(str(dependencies))
serializer = Serializer(classes=dependencies)
return serializer.body(obj, class_name, keep_readonly=True)
def get_poller_result(self, poller, wait=5):
'''
Consistent method of waiting on and retrieving results from Azure's long poller
:param poller Azure poller object
:return object resulting from the original request
'''
try:
delay = wait
while not poller.done():
self.log("Waiting for {0} sec".format(delay))
poller.wait(timeout=delay)
return poller.result()
except Exception as exc:
self.log(str(exc))
raise
def check_provisioning_state(self, azure_object, requested_state='present'):
'''
Check an Azure object's provisioning state. If something did not complete the provisioning
process, then we cannot operate on it.
:param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
and name attributes.
:return None
'''
if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
hasattr(azure_object, 'name'):
# resource group object fits this model
if isinstance(azure_object.properties.provisioning_state, Enum):
if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
if isinstance(azure_object.provisioning_state, Enum):
if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
keys = dict()
try:
# Get keys from the storage account
self.log('Getting keys')
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
except Exception as exc:
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
try:
self.log('Create blob service')
if storage_blob_type == 'page':
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_page_blob_service()
elif storage_blob_type == 'block':
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service()
else:
raise Exception("Invalid storage blob type defined.")
except Exception as exc:
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
str(exc)))
def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic'):
'''
Create a default public IP address <public_ip_name> to associate with a network interface.
If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
:param resource_group: name of an existing resource group
:param location: a valid azure location
:param public_ip_name: base name to assign the public IP address
:param allocation_method: one of 'Static' or 'Dynamic'
:return: PIP object
'''
pip = None
self.log("Starting create_default_pip {0}".format(public_ip_name))
self.log("Check to see if public IP {0} exists".format(public_ip_name))
try:
pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
except CloudError:
pass
if pip:
self.log("Public ip {0} found.".format(public_ip_name))
self.check_provisioning_state(pip)
return pip
params = self.network_models.PublicIPAddress(
location=location,
public_ip_allocation_method=allocation_method,
)
self.log('Creating default public IP {0}'.format(public_ip_name))
try:
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
except Exception as exc:
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
return self.get_poller_result(poller)
def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
'''
Create a default security group <security_group_name> to associate with a network interface. If a security group matching
<security_group_name> exists, return it. Otherwise, create one.
:param resource_group: Resource group name
:param location: azure location name
:param security_group_name: base name to use for the security group
:param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
:param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
:param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
:return: security_group object
'''
group = None
self.log("Create security group {0}".format(security_group_name))
self.log("Check to see if security group {0} exists".format(security_group_name))
try:
group = self.network_client.network_security_groups.get(resource_group, security_group_name)
except CloudError:
pass
if group:
self.log("Security group {0} found.".format(security_group_name))
self.check_provisioning_state(group)
return group
parameters = self.network_models.NetworkSecurityGroup()
parameters.location = location
if not open_ports:
# Open default ports based on OS type
if os_type == 'Linux':
# add an inbound SSH rule
parameters.security_rules = [
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow SSH Access',
source_port_range='*', destination_port_range='22', priority=100, name='SSH')
]
parameters.location = location
else:
# for windows add inbound RDP and WinRM rules
parameters.security_rules = [
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow RDP port 3389',
source_port_range='*', destination_port_range='3389', priority=100, name='RDP01'),
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow WinRM HTTPS port 5986',
source_port_range='*', destination_port_range='5986', priority=101, name='WinRM01'),
]
else:
# Open custom ports
parameters.security_rules = []
priority = 100
for port in open_ports:
priority += 1
rule_name = "Rule_{0}".format(priority)
parameters.security_rules.append(
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', source_port_range='*',
destination_port_range=str(port), priority=priority, name=rule_name)
)
self.log('Creating default security group {0}'.format(security_group_name))
try:
poller = self.network_client.network_security_groups.create_or_update(resource_group,
security_group_name,
parameters)
except Exception as exc:
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
return self.get_poller_result(poller)
@staticmethod
def _validation_ignore_callback(session, global_config, local_config, **kwargs):
session.verify = False
def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
self.log('Getting management service client {0}'.format(client_type.__name__))
self.check_client_version(client_type)
if api_version:
client = client_type(self.azure_credentials,
self.subscription_id,
api_version=api_version,
base_url=base_url)
else:
client = client_type(self.azure_credentials,
self.subscription_id,
base_url=base_url)
# Add user agent for Ansible
client.config.add_user_agent(ANSIBLE_USER_AGENT)
# Add user agent when running from Cloud Shell
if CLOUDSHELL_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
# Add user agent when running from VSCode extension
if VSCODEEXT_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
if self._cert_validation_mode == 'ignore':
client.config.session_configuration_callback = self._validation_ignore_callback
return client
@property
def storage_client(self):
self.log('Getting storage client...')
if not self._storage_client:
self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-10-01')
return self._storage_client
@property
def storage_models(self):
self.log('Getting storage models...')
return StorageManagementClient.models("2017-10-01")
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-06-01')
return self._network_client
@property
def network_models(self):
self.log("Getting network models...")
return NetworkManagementClient.models("2017-06-01")
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-05-10')
return self._resource_client
@property
def rm_models(self):
self.log("Getting resource manager models")
return ResourceManagementClient.models("2017-05-10")
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-03-30')
return self._compute_client
@property
def compute_models(self):
self.log("Getting compute models")
return ComputeManagementClient.models("2017-03-30")
@property
def dns_client(self):
self.log('Getting dns client')
if not self._dns_client:
self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._dns_client
@property
def web_client(self):
self.log('Getting web client')
if not self._web_client:
self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._web_client
@property
def containerservice_client(self):
self.log('Getting container service client')
if not self._containerservice_client:
self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._containerservice_client | unknown | codeparrot/codeparrot-clean | ||
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [],
"schemaVersion": 42,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Time Picker No Time Options Test Dashboard",
"weekStart": ""
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/testdata/output/latest_version/v41.time_picker_no_time_options.v42.json |
# encoding: utf-8
from south.v2 import DataMigration
class Migration(DataMigration):
depends_on = (('socialaccount', '0002_genericmodels'),)
def forwards(self, orm):
# Migrate FB apps
app_id_to_sapp = {}
for app in orm.FacebookApp.objects.all():
sapp = orm['socialaccount.SocialApp'].objects \
.create(site=app.site,
provider='facebook',
name=app.name,
key=app.application_id,
secret=app.application_secret)
app_id_to_sapp[app.id] = sapp
# Migrate FB accounts
acc_id_to_sacc = {}
for acc in orm.FacebookAccount.objects.all():
sacc = acc.socialaccount_ptr
sacc.uid = acc.social_id
sacc.extra_data = { 'link': acc.link,
'name': acc.name }
sacc.provider = 'facebook'
sacc.save()
acc_id_to_sacc[acc.id] = sacc
# Migrate tokens
for token in orm.FacebookAccessToken.objects.all():
sapp = app_id_to_sapp[token.app.id]
sacc = acc_id_to_sacc[token.account.id]
orm['socialaccount.SocialToken'].objects \
.create(app=sapp,
account=sacc,
token=token.access_token,
token_secret='')
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook.facebookaccesstoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'FacebookAccessToken'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook.FacebookAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook.FacebookApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'facebook.facebookaccount': {
'Meta': {'object_name': 'FacebookAccount', '_ormbases': ['socialaccount.SocialAccount']},
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'social_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'socialaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['socialaccount.SocialAccount']", 'unique': 'True', 'primary_key': 'True'})
},
'facebook.facebookapp': {
'Meta': {'object_name': 'FacebookApp'},
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'application_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'application_secret': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount', 'facebook'] | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that it's OK to have C code that does nothing other than
// initialize a global variable. This used to fail with gccgo.
package gcc68255
/*
#include "c.h"
*/
import "C"
func F() bool {
return C.v != nil
} | go | github | https://github.com/golang/go | src/cmd/cgo/internal/test/gcc68255/a.go |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA TensorArray Ops."""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _make_converter(dtype):
def _converter(x):
return np.asarray(x).astype(dtype.as_numpy_dtype)
return _converter
# This lets me define `fn` repeatedly to pass to xla.compile.
#
# pylint: disable=function-redefined
@test_util.run_v1_only("b/") # Support TF2 list operations
@test_util.with_control_flow_v2
class TensorArrayTest(xla_test.XLATestCase):
@test_util.disable_control_flow_v2("Tries to evaluate flow")
def testTensorArrayWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0, 3.0]])
w2 = w1.write(2, [[7.0, -8.5]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
flow = w2.flow
return [r0, r1, r2, flow]
d0, d1, d2, flow_val = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0, 3.0]], d1)
self.assertAllEqual([[7.0, -8.5]], d2)
self.assertAllEqual([], flow_val.shape)
def _testTensorArrayWritePack(self, tf_dtype):
with self.session(), self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
return w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]),
self.evaluate(xla.compile(fn)[0]))
def testTensorArrayWritePack(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWritePack(dtype)
def testEmptyTensorArrayPack(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
return w2.stack()
self.assertAllEqual([3, 0, 1], self.evaluate(xla.compile(fn)[0]).shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.session(), self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0], [124.0, 125.0]]))
return w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [6.0, 7.0], [106.0, 107.0],
[8.0, 9.0], [124.0, 125.0]]),
self.evaluate(xla.compile(fn)[0]))
@test_util.disable_control_flow_v2("b/122315751 (concat)")
def testTensorArrayWriteConcat(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteConcat(dtype)
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors.
w1 = ta.unstack(
convert([[1.0, 1.03125], [2.0, 2.03125], [3.0, 3.03125]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([1.0, 1.03125]), d0)
self.assertAllEqual(convert([2.0, 2.03125]), d1)
self.assertAllEqual(convert([3.0, 3.03125]), d2)
def fn():
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayUnpackRead(dtype)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Split an empty vector.
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def fn():
# Split a vector.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([1.0]), d0)
self.assertAllEqual(convert([2.0]), d1)
self.assertAllEqual(convert([3.0]), d2)
def fn():
# Split a matrix.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 121.0], [3.0, 127.0]]),
lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
return [r0, r1, r2]
d0, d1, d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual(convert([[1.0, 101.0]]), d0)
self.assertAllEqual(convert([[2.0, 121.0]]), d1)
self.assertAllEqual(convert([[3.0, 127.0]]), d2)
@test_util.disable_control_flow_v2("b/122315872 (split)")
def testTensorArraySplitRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArraySplitRead(dtype)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradArrayWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad")
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
return [r0, r1, r2, g_r0, g_r1, g_r2]
d0, d1, d2, g_d0, g_d1, g_d2 = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradArrayDynamicWriteRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
return [r0, r1, r2, g_r0, g_r1, g_r2, s, g_s]
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.session() as session, self.test_scope():
ta_out = {}
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
element_shape=[1, 2])
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
ta_out[0] = g_ta_0.handle
ta_out[1] = g_ta_1.handle
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
with ops.control_dependencies([g_ta_0.handle.op, g_ta_1.handle.op]):
return [r1_0]
[d_r1_0] = self.evaluate(xla.compile(fn))
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
# Can't assert this because adding a side output like we have here fails
# as follows:
#
# ValueError: Operation u'TensorArrayGrad/TensorArrayGradV3' has been
# marked as not fetchable.
#
# On the other hand, legitimately returning the handle from the
# xla.compile function fails because we don't support DT_RESOURCE outputs
# from XLA clusters.
#
# self.assertAllEqual(ta_out[0], ta_out[1])
@test_util.disable_control_flow_v2("b/124334470")
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
return ta.write(-1, constant_op.constant(7)).flow
# Test writing the wrong datatype.
# TODO(b/129870929): Remove InvalidArgumentError/second regexp after all
# callers provide proper init dtype.
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), r"("
r"conversion requested dtype float32 for Tensor with dtype int32"
r"|"
r"TensorArray dtype is float but op has dtype int32"
r")"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/124334096 verify dtype")
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
# Find two different floating point types, create an array of
# the first type, but try to read the other type.
if len(self.float_types) > 1:
dtype1, dtype2 = list(self.float_types)[:2]
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, math_ops.cast([[4.0, 5.0]], dtype1))
# Test reading wrong datatype.
return gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtype2, flow_in=w0.flow)
with self.assertRaisesOpError("TensorArray dtype is "):
self.evaluate(xla.compile(fn))
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, math_ops.cast([[4.0, 5.0]], dtype1))
# Test reading from a different index than the one we wrote to
with ops.control_dependencies([w0.read(1)]):
return 1.0
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/122315872 (split)")
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split([1.0, 2.0, 3.0], 1).flow
with self.assertRaisesWithPredicateMatch(
ValueError, r"Shape must be rank 1 but is rank 0"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split([1.0, 2.0, 3.0], [1, 2, 3]).flow
with self.assertRaisesOpError(
r"lengths must be equal: 1 vs. 2"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
return ta.split(1.0, [1]).flow
with self.assertRaisesOpError(
r"value must have rank >= 1"):
xla.compile(fn)[0].eval()
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
infer_shape=False)
return ta.split([1.0], [1]).flow
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(1 vs. 2\)"):
xla.compile(fn)[0].eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.session(), self.test_scope():
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
return w2_grad.read(2)
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), xla.compile(fn)[0])
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
return wb1_grad.flow
with self.assertRaisesOpError(
r"Mismatched TensorArray sizes"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("TensorArray.grad is not supported in v2")
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.session(), self.test_scope():
def fn():
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
return r1 + r2
self.assertAllClose(9.0, self.evaluate(xla.compile(fn)[0]))
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.session() as session, self.test_scope():
c = lambda x: np.array(x, dtype=dtype)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c([[3.0, 3.5]]))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c([[-2.0, -4.0]])])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]),
c([[1.0, -1.0]]),
c([[-2.0, -10.0]])])
return [grad_just_r0, grad_r0_r0_2, grad_just_r1, grad]
[grad_just_r0_vals, grad_r0_r0_2_vals, grad_just_r1_vals,
grad_vals] = self.evaluate(xla.compile(fn))
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
self.assertAllEqual(c([[-2.0, -4.0]]), grad_just_r1_vals[0])
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c([[-2.0, -10.0]]), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in self.float_types:
self._testTensorArrayGradientWriteReadType(dtype)
for dtype in self.complex_types:
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.session() as sess, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat().
with ops.control_dependencies([p0, r0, s0]):
return gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # stack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0], # concat gradient
])
grad_vals = self.evaluate(xla.compile(fn)) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
@test_util.disable_control_flow_v2("b/122315751 (concat)")
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
def testTensorArrayReadTwice(self):
with self.session(), self.test_scope():
def fn():
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
return [r0_readtwice, r1_readtwice]
self.assertAllEqual([1.0, -1.0], self.evaluate(xla.compile(fn))[0])
def _testTensorArrayGradientUnpackRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0).
return gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = self.evaluate(xla.compile(fn))
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
@test_util.disable_control_flow_v2("b/122315751(concat), b/122315872(split)")
def testTensorArrayGradientSplitConcat(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0],
[100.0, -100.0], [1000.0, -1000.0]])
w = ta.split(value, [2, 2])
r = w.concat()
# Test combined gradients
return gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]]])
grad_vals = self.evaluate(xla.compile(fn))
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]],
grad_vals[0])
def testCloseTensorArray(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with ops.control_dependencies([ta.close()]):
return 1.0
self.evaluate(xla.compile(fn)[0])
def testSizeTensorArray(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
return ta.size()
self.assertAllEqual(3, self.evaluate(xla.compile(fn))[0])
def testWriteCloseTensorArray(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[3.0, 1.0]])
with ops.control_dependencies([w1.close()]):
return 1.0
self.evaluate(xla.compile(fn))
# TODO(phawkins): implement while loops.
# def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
# np_dtype = dtype.as_numpy_dtype
# with self.session() as session, self.test_scope():
# v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
# var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
# state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
# ta = tensor_array_ops.TensorArray(
# dtype=dtype,
# tensor_array_name="foo",
# size=0 if dynamic_size else 3,
# dynamic_size=dynamic_size)
# time_0 = array_ops.identity(0)
# def body(time, ta_t, state):
# sliced = array_ops.slice(
# v0, begin=array_ops_stack.stack([time, 0]), size=[1, -1])
# sliced = array_ops.squeeze(sliced)
# out = sliced + var + state
# state += sliced
# ta_t = ta_t.write(time, out)
# return (time + 1, ta_t, state)
# (unused_0, h_final, unused_2) = control_flow_ops.while_loop(
# cond=lambda time, unused_1, unused_2: time < 3,
# body=body,
# loop_vars=(time_0, ta, state0),
# shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
# tensor_shape.unknown_shape()),
# parallel_iterations=3)
# vout = h_final.stack()
# grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
# v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
# state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
# var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
# self.evaluate(variables.global_variables_initializer())
# state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
# self.evaluate([state0, var, v0, vout, v0_grad, var_grad, state0_grad])
# )
# just_v0_grad_t, = self.evaluate([v0_grad])
# # state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# # vout = [ v0[0] + var + state[0] |
# # v0[1] + var + state[1] |
# # v0[2] + var + state[2] ]
# # = [ v0[0] + var + state0 |
# # v0[1] + var + state0 + v0[0] |
# # v0[2] + var + state0 + v0[0] + v0[1] ]
# #
# # d(vout[0])/d(v0) = [1 | 0 | 0 ]
# # d(vout[1])/d(v0) = [1 | 1 | 0 ]
# # d(vout[2])/d(v0) = [1 | 1 | 1 ]
# # d(vout)/d(var) = [1 | 1 | 1]
# # d(vout)/d(state0) = [ 1 | 1 | 1 ]
# state_per_time = np.array(
# [state0_t, state0_t + v0_t[0, :],
# state0_t + v0_t[0, :] + v0_t[1, :]])
# # Compare forward prop
# self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# # Compare backward prop
# expected_v0_grad_t = np.array([
# grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
# grad_val[1, :] + grad_val[2, :], grad_val[2, :]
# ])
# self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
# self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
# def testWhileLoopWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=dtypes.float32)
# # TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# # self._testWhileLoopWritePackGradients(
# # dynamic_size=False, dtype=tf.int64)
# def testWhileLoopDynamicWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=True, dtype=dtypes.float32)
# def testGradSerialTwoLoops(self):
# with self.session(), self.test_scope():
# num_steps = 100
# acc = tensor_array_ops.TensorArray(
# dtype=dtypes.float32,
# size=num_steps,
# clear_after_read=False,
# element_shape=tensor_shape.scalar())
# i = constant_op.constant(0, name="i")
# x = constant_op.constant(2.0, name="x")
# c = lambda i, acc: i < 5
# def b(i, acc):
# x1 = cond.cond(
# math_ops.equal(i, 0), lambda: x,
# lambda: math_ops.multiply(acc.read(i - 1), 2.0))
# return i + 1, acc.write(i, x1)
# i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
# z = constant_op.constant(0.0)
# def fn(i, acc):
# return i + 1, acc.write(i, z)
# _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
# [i1, acc1])
# r = acc2.stack()
# grad = gradients_impl.gradients(r, [x])[0]
# self.assertAllClose(31.0, self.evaluate(grad))
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.session() as session, self.test_scope():
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
def fn():
a = array_ops.identity(
np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(0, name="read_a_0") + # a + b
ta.read(1, name="read_b_0"))
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
return [grad_a, grad_b]
grad_a, grad_b = xla.compile(fn)
# Test gradients calculated individually
grad_a_t, = self.evaluate([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = self.evaluate([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly.
joint_grad_a_t, joint_grad_b_t = self.evaluate([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def testWriteShape(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
return [c0, r0]
c0, r0 = xla.compile(fn)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w0 = ta.write(0, c0)
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
return [r0, c1, r1]
[r0, c1, r1] = xla.compile(fn)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, c0)
c2 = constant_op.constant([4.0, 5.0, 6.0])
return w0.write(0, c2).flow
with self.assertRaises(ValueError):
self.evaluate(xla.compile(fn))
def _testGradientWhenNotAllComponentsRead(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# Calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
return gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = self.evaluate(xla.compile(fn))[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayEvalEmpty(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=False)
return ta.stack()
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError, "Uninitialized TensorArray passed to "
"TensorArrayStack/TensorArrayGatherV3"):
xla.compile(fn)[0].eval()
@test_util.disable_control_flow_v2("b/124335246")
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
def _testTensorArrayEvalEmptyWithDefault(self):
with self.session(), self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
size = ta.size()
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
return [size, ta.stack()]
[size, stack] = self.evaluate(xla.compile(fn))
self.assertEqual(0, size)
self.assertAllEqual([0, 3, 5], stack.shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
return ta.concat()
# TODO(b/122315751): Enable this.
self.assertAllEqual([0, 5], self.evaluate(xla.compile(fn))[0].shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
def _testTensorArrayScatterRead(self, tf_dtype):
with self.session() as session, self.test_scope():
convert = _make_converter(tf_dtype)
id0 = array_ops.placeholder(dtypes.int32)
id1 = array_ops.placeholder(dtypes.int32)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant(convert([[1.0, 5.0], [10.0, 20.0]]))
w = ta.scatter(indices, value)
r0 = w.read(id0)
r1 = w.read(id1)
return [r0, r1]
# Test aggregation of read
read_vals = session.run(xla.compile(fn), feed_dict={id0: 1, id1: 8})
self.assertAllEqual(convert([1.0, 5.0]), read_vals[0])
self.assertAllEqual(convert([10.0, 20.0]), read_vals[1])
@test_util.disable_control_flow_v2("b/122315734 (scatter)")
def testTensorArrayScatterRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayScatterRead(dtype)
self._testTensorArrayScatterRead(dtypes.bool)
@test_util.disable_control_flow_v2("b/122315734 (scatter)")
def testTensorArrayScatterReadAndGradients(self):
with self.session() as session, self.test_scope():
id0 = array_ops.placeholder(dtypes.int32)
id1 = array_ops.placeholder(dtypes.int32)
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(id0)
r1 = w.read(id1)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
return [[r0, r1], grad]
read_vals, grad_vals = session.run(
xla.compile(fn), feed_dict={
id0: 1,
id1: 8
})
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.disable_control_flow_v2("b/122315378 (gather)")
def testTensorArrayWriteGatherAndGradients(self):
with self.session() as session, self.test_scope():
def fn():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=10)
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
return [[g], grad]
g_vals, grad_vals = self.evaluate(xla.compile(fn))
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayIdentity(self):
with self.session() as session, self.test_scope():
tensor_arrays = {}
v0 = resource_variable_ops.ResourceVariable(0.0)
v1 = resource_variable_ops.ResourceVariable(0.0)
def fn():
ta0 = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, infer_shape=False)
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=4, infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
with ops.control_dependencies([v0.assign_add(1.0)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1.0)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
tensor_arrays[0] = ta0
tensor_arrays[1] = ta1
return [read0, read1, size0, size1, v0, v1]
self.evaluate(variables.global_variables_initializer())
read0_v, read1_v, size0_v, size1_v, v0, v1 = self.evaluate(
xla.compile(fn))
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, tensor_arrays[0].dtype)
self.assertEqual(dtypes.int32, tensor_arrays[1].dtype)
# Tests that the control dependencies was added and executed.
self.assertEqual(1.0, v0)
self.assertEqual(1.0, v1)
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
if __name__ == "__main__":
test.main() | python | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/tests/tensor_array_ops_test.py |
import sys
from artiq import *
class Mandelbrot(EnvExperiment):
"""Mandelbrot set demo"""
def build(self):
self.setattr_device("core")
def col(self, i):
sys.stdout.write(" .,-:;i+hHM$*#@ "[i])
def row(self):
print("")
# based on: http://warp.povusers.org/MandScripts/python.html
@kernel
def run(self):
minX = -2.0
maxX = 1.0
width = 78
height = 36
aspectRatio = 2
yScale = (maxX-minX)*(height/width)*aspectRatio
for y in range(height):
for x in range(width):
c_r = minX+x*(maxX-minX)/width
c_i = y*yScale/height-yScale/2
z_r = c_r
z_i = c_i
for i in range(16):
if z_r*z_r + z_i*z_i > 4:
break
new_z_r = (z_r*z_r)-(z_i*z_i) + c_r
z_i = 2*z_r*z_i + c_i
z_r = new_z_r
self.col(i)
self.row() | unknown | codeparrot/codeparrot-clean | ||
# Set up your development environment
This folder contains useful scripts and configuration so you can:
- Configure data sources in Grafana for development.
- Configure dashboards for development and test scenarios.
- Set up an SMTP Server + Web Interface for viewing and testing emails.
- Create docker-compose file with databases and fake data.
## Install Docker
Grafana uses [Docker](https://docker.com) to make the task of setting up databases a little easier. If you do not have it already, make sure you [install Docker](https://docs.docker.com/docker-for-mac/install/) before proceeding to the next step.
## Developer dashboards and data sources
To setup developer dashboards and data sources
```bash
./setup.sh
```
To remove the setup developer dashboards and data sources
```bash
./setup.sh undev
```
After restarting the Grafana server (`make run`), there should be a number of data sources named `gdev-<type>`
provisioned as well as a dashboard folder named `gdev dashboards`. This folder contains dashboard and panel features
tests dashboards.
Please update these dashboards or make new ones as new panels and dashboards features are developed or new bugs are
found. The dashboards are located in the `devenv/dev-dashboards` folder.
## docker-compose with databases
This command creates a docker-compose file with specified databases configured and ready to run. Each database has
a prepared image with some fake data ready to use. For available databases, see `docker/blocks` directory. Notice that
for some databases there are multiple images with different versions. Some blocks such as `slow_proxy_mac` or `apache_proxy_mac` are specifically for Macs.
```bash
make devenv sources=influxdb,prometheus,elastic5
```
Some of the blocks support dynamic change of the image version used in the Docker file. The signature looks like this:
```bash
make devenv sources=postgres,auth/openldap,grafana postgres_version=9.2 grafana_version=6.7.0-beta1
```
### Notes per block
#### Grafana
The grafana block is pre-configured with the dev-datasources and dashboards.
#### Tempo
The tempo block runs loki and prometheus as well and should not be ran with prometheus as a separate source. You need to install a docker plugin for the self logging to work, without it the container won't start. See https://grafana.com/docs/loki/latest/clients/docker-driver/#installing for installation instructions.
#### Jaeger
Jaeger block runs both Jaeger and Loki container. Loki container sends traces to Jaeger and also logs its own logs into itself so it is possible to setup derived field for traceID from Loki to Jaeger. You need to install a docker plugin for the self logging to work, without it the container won't start. See https://grafana.com/docs/loki/latest/clients/docker-driver/#installing for installation instructions.
#### Graphite
| version | source name | graphite-web port | plaintext port | pickle port |
| ------- | ----------- | ----------------- | -------------- | ----------- |
| 1.1 | graphite | 8180 | 2103 | 2103 |
| 1.0 | graphite1 | 8280 | 2203 | 2203 |
| 0.9 | graphite09 | 8380 | 2303 | 2303 |
#### MailDev
MailDev block runs an SMTP server and a web UI to test and view emails. This is useful for testing your email notifications locally.
Make sure you configure your .ini file with the following settings:
```ini
[smtp]
enabled = true
skip_verify = true
host = "localhost:1025"
```
You can access the web UI at http://localhost:12080/#/
## Debugging setup in VS Code
An example of launch.json is provided in `.vscode/launch.json`. It basically does what Makefile and .bra.toml do. The 'program' field is set to the folder name so VS Code loads all \*.go files in it instead of just main.go.
## Troubleshooting
### Containers that read from log files fail to start (Mac OS)
If you are running Mac OSX, containers that read from the log files (e.g. Telegraf, Fileabeat, Promtail) can fail to start. This is because the default Docker for Mac does not have permission to create `grafana` folder at the `/var/log` location, as it runs as the current user. To solve this issue, manually create the folder `/var/log/grafana`, then start the containers again.
```
sudo mkdir /var/log/grafana
``` | unknown | github | https://github.com/grafana/grafana | devenv/README.md |
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql
import (
"bytes"
"compress/zlib"
"context"
"encoding/base64"
"fmt"
"net/url"
"strings"
"github.com/cockroachdb/cockroach/pkg/featureflag"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/inverted"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/constraint"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec/explain"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/row"
"github.com/cockroachdb/cockroach/pkg/sql/rowcontainer"
"github.com/cockroachdb/cockroach/pkg/sql/sem/builtins"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treewindow"
"github.com/cockroachdb/cockroach/pkg/sql/span"
"github.com/cockroachdb/cockroach/pkg/sql/sqlclustersettings"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/intsets"
"github.com/cockroachdb/errors"
)
type execFactory struct {
ctx context.Context
planner *planner
// isExplain is true if this factory is used to build a statement inside
// EXPLAIN or EXPLAIN ANALYZE.
isExplain bool
}
var _ exec.Factory = &execFactory{}
func newExecFactory(ctx context.Context, p *planner) *execFactory {
return &execFactory{
ctx: ctx,
planner: p,
}
}
// Ctx implements the Factory interface.
func (ef *execFactory) Ctx() context.Context {
return ef.ctx
}
// ConstructValues is part of the exec.Factory interface.
func (ef *execFactory) ConstructValues(
rows [][]tree.TypedExpr, cols colinfo.ResultColumns,
) (exec.Node, error) {
if len(cols) == 0 && len(rows) == 1 {
return &unaryNode{}, nil
}
if len(rows) == 0 {
return &zeroNode{columns: cols}, nil
}
return &valuesNode{
columns: cols,
tuples: rows,
specifiedInQuery: true,
}, nil
}
// ConstructLiteralValues is part of the exec.Factory interface.
func (ef *execFactory) ConstructLiteralValues(
rows tree.ExprContainer, cols colinfo.ResultColumns,
) (exec.Node, error) {
if len(cols) == 0 && rows.NumRows() == 1 {
return &unaryNode{}, nil
}
if rows.NumRows() == 0 {
return &zeroNode{columns: cols}, nil
}
switch t := rows.(type) {
case *rowcontainer.RowContainer:
return &valuesNode{
columns: cols,
specifiedInQuery: true,
externallyOwnedContainer: true,
valuesRun: valuesRun{rows: t},
}, nil
case *tree.VectorRows:
return &valuesNode{
columns: cols,
specifiedInQuery: true,
externallyOwnedContainer: true,
coldataBatch: t.Batch,
}, nil
default:
return nil, errors.AssertionFailedf("unexpected rows type %T in ConstructLiteralValues", rows)
}
}
// recordIndexRead - if applicable - records the fact that the given index has
// been used for reading.
func (ef *execFactory) recordIndexRead(tabDesc catalog.TableDescriptor, idx catalog.Index) {
if !ef.isExplain && !ef.planner.SessionData().Internal {
idxUsageKey := roachpb.IndexUsageKey{
TableID: roachpb.TableID(tabDesc.GetID()),
IndexID: roachpb.IndexID(idx.GetID()),
}
ef.planner.extendedEvalCtx.indexUsageStats.RecordRead(idxUsageKey)
}
}
// ConstructScan is part of the exec.Factory interface.
func (ef *execFactory) ConstructScan(
table cat.Table, index cat.Index, params exec.ScanParams, reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
if table.IsVirtualTable() {
return ef.constructVirtualScan(table, index, params, reqOrdering)
}
tabDesc := table.(*optTable).desc
idx := index.(*optIndex).idx
// Create a scanNode.
scan := ef.planner.Scan()
colCfg := makeScanColumnsConfig(table, params.NeededCols)
if err := scan.initDescDefaults(tabDesc, colCfg); err != nil {
return nil, err
}
if params.IndexConstraint != nil && params.IndexConstraint.IsContradiction() {
return newZeroNode(scan.columns), nil
}
scan.index = idx
scan.hardLimit = params.HardLimit
scan.softLimit = params.SoftLimit
scan.reverse = params.Reverse
scan.parallelize = params.Parallelize
var err error
scan.spans, err = generateScanSpans(ef.ctx, ef.planner.EvalContext(), ef.planner.ExecCfg().Codec, tabDesc, idx, params)
if err != nil {
return nil, err
}
scan.isFull = len(scan.spans) == 1 && scan.spans[0].EqualValue(
scan.desc.IndexSpanAllowingExternalRowData(ef.planner.ExecCfg().Codec, scan.index.GetID()),
)
if err = colCfg.assertValidReqOrdering(reqOrdering); err != nil {
return nil, err
}
scan.reqOrdering = ReqOrdering(reqOrdering)
scan.estimatedRowCount = params.EstimatedRowCount
scan.statsCreatedAt = params.StatsCreatedAt
scan.lockingStrength = descpb.ToScanLockingStrength(params.Locking.Strength)
scan.lockingWaitPolicy = descpb.ToScanLockingWaitPolicy(params.Locking.WaitPolicy)
scan.lockingDurability = descpb.ToScanLockingDurability(params.Locking.Durability)
scan.localityOptimized = params.LocalityOptimized
ef.recordIndexRead(tabDesc, idx)
return scan, nil
}
func generateScanSpans(
ctx context.Context,
evalCtx *eval.Context,
codec keys.SQLCodec,
tabDesc catalog.TableDescriptor,
index catalog.Index,
params exec.ScanParams,
) (roachpb.Spans, error) {
var sb span.Builder
sb.InitAllowingExternalRowData(evalCtx, codec, tabDesc, index)
if params.InvertedConstraint != nil {
return sb.SpansFromInvertedSpans(ctx, params.InvertedConstraint, params.IndexConstraint, false /* prefixIncludedInKeys */, nil /* scratch */)
}
var splitter span.Splitter
if params.Locking.MustLockAllRequestedColumnFamilies() {
splitter = span.MakeSplitterForSideEffect(tabDesc, index, params.NeededCols)
} else {
splitter = span.MakeSplitter(tabDesc, index, params.NeededCols)
}
return sb.SpansFromConstraint(params.IndexConstraint, splitter)
}
func (ef *execFactory) constructVirtualScan(
table cat.Table, index cat.Index, params exec.ScanParams, reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
return constructVirtualScan(
ef, ef.planner, table, index, params, reqOrdering,
func(d *delayedNode) (exec.Node, error) { return d, nil },
)
}
// ConstructFilter is part of the exec.Factory interface.
func (ef *execFactory) ConstructFilter(
n exec.Node, filter tree.TypedExpr, reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
p := n.(planNode)
f := &filterNode{
singleInputPlanNode: singleInputPlanNode{p},
columns: planColumns(p),
}
f.filter = filter
f.reqOrdering = ReqOrdering(reqOrdering)
return f, nil
}
// ConstructInvertedFilter is part of the exec.Factory interface.
func (ef *execFactory) ConstructInvertedFilter(
n exec.Node,
invFilter *inverted.SpanExpression,
preFiltererExpr tree.TypedExpr,
preFiltererType *types.T,
invColumn exec.NodeColumnOrdinal,
) (exec.Node, error) {
inputCols := planColumns(n.(planNode))
columns := make(colinfo.ResultColumns, len(inputCols))
copy(columns, inputCols)
n = &invertedFilterNode{
singleInputPlanNode: singleInputPlanNode{n.(planNode)},
columns: columns,
invertedFilterPlanningInfo: invertedFilterPlanningInfo{
expression: invFilter,
preFiltererExpr: preFiltererExpr,
preFiltererType: preFiltererType,
invColumn: int(invColumn),
},
}
return n, nil
}
// ConstructSimpleProject is part of the exec.Factory interface.
func (ef *execFactory) ConstructSimpleProject(
n exec.Node, cols []exec.NodeColumnOrdinal, reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
return constructSimpleProjectForPlanNode(n.(planNode), cols, nil /* colNames */, reqOrdering)
}
func constructSimpleProjectForPlanNode(
n planNode, cols []exec.NodeColumnOrdinal, colNames []string, reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
// If the top node is already a renderNode, we can just rearrange the columns
// as an optimization if each render expression is projected exactly once.
if r, ok := n.(*renderNode); ok && canRearrangeRenders(cols, r.render) {
oldCols, oldRenders := r.columns, r.render
r.columns = make(colinfo.ResultColumns, len(cols))
r.render = make([]tree.TypedExpr, len(cols))
for i, ord := range cols {
r.columns[i] = oldCols[ord]
if colNames != nil {
r.columns[i].Name = colNames[i]
}
r.render[i] = oldRenders[ord]
}
r.reqOrdering = ReqOrdering(reqOrdering)
return r, nil
}
inputCols := planColumns(n)
var rb renderBuilder
rb.init(n, reqOrdering)
// TODO(mgartner): With an indexed var helper we are potentially allocating
// more indexed variables than we need. We only need len(cols) indexed vars
// but we have to allocate len(inputCols) to make sure there is an indexed
// variable for all possible input ordinals.
ivh := tree.MakeIndexedVarHelper(rb.r, len(inputCols))
exprs := make(tree.TypedExprs, len(cols))
for i, col := range cols {
exprs[i] = ivh.IndexedVar(int(col))
}
var resultTypes []*types.T
if colNames != nil {
// We will need updated result types.
resultTypes = make([]*types.T, len(cols))
for i := range exprs {
resultTypes[i] = exprs[i].ResolvedType()
}
}
resultCols := getResultColumnsForSimpleProject(cols, colNames, resultTypes, inputCols)
rb.setOutput(exprs, resultCols)
return rb.res, nil
}
// canRearrangeRenders returns true if the renderNode with the given columns and
// render expressions can be combined with a parent renderNode. This is possible
// if there are no duplicates in the columns, and every render expression is
// referenced at least once. In other words, it is possible when every render
// expression is projected exactly once. This ensures that no side effects are
// lost or duplicated, even if the result of an expression isn't needed (or is
// needed more than once).
func canRearrangeRenders(cols []exec.NodeColumnOrdinal, render tree.TypedExprs) bool {
// Check whether each render expression is projected at least once, if
// that's not the case, then we must add another processor in order for
// each render expression to be evaluated (this is needed for edge cases
// like the render expressions resulting in errors).
//
// See also PhysicalPlan.AddProjection for a similar case.
if len(cols) < len(render) {
// There is no way for each of the render expressions to be referenced.
return false
}
var colsSeen intsets.Fast
renderUsed := make([]bool, len(render))
for _, c := range cols {
if colsSeen.Contains(int(c)) {
return false
}
colsSeen.Add(int(c))
renderUsed[c] = true
}
for _, used := range renderUsed {
// Need to add a new renderNode if at least one render is not projected.
if !used {
return false
}
}
return true
}
// ConstructSerializingProject is part of the exec.Factory interface.
func (ef *execFactory) ConstructSerializingProject(
n exec.Node, cols []exec.NodeColumnOrdinal, colNames []string,
) (exec.Node, error) {
node := n.(planNode)
// If we are just renaming columns, we can do that in place.
if len(cols) == len(planColumns(node)) {
identity := true
for i := range cols {
if cols[i] != exec.NodeColumnOrdinal(i) {
identity = false
break
}
}
if identity {
inputCols := planMutableColumns(node)
for i := range inputCols {
inputCols[i].Name = colNames[i]
}
// TODO(yuzefovich): if n is not a renderNode, we won't serialize
// it, but this is breaking the contract of
// ConstructSerializingProject. We should clean this up, but in the
// mean time it seems acceptable given that the method is called
// only for the root node.
if r, ok := n.(*renderNode); ok {
r.serialize = true
}
return n, nil
}
}
res, err := constructSimpleProjectForPlanNode(node, cols, colNames, nil /* reqOrdering */)
if err != nil {
return nil, err
}
switch r := res.(type) {
case *renderNode:
r.serialize = true
default:
return nil, errors.AssertionFailedf("unexpected planNode type %T in ConstructSerializingProject", res)
}
return res, nil
}
// ConstructRender is part of the exec.Factory interface.
// N.B.: The input exprs will be modified.
func (ef *execFactory) ConstructRender(
n exec.Node,
columns colinfo.ResultColumns,
exprs tree.TypedExprs,
reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
var rb renderBuilder
rb.init(n, reqOrdering)
rb.setOutput(exprs, columns)
return rb.res, nil
}
// ConstructHashJoin is part of the exec.Factory interface.
func (ef *execFactory) ConstructHashJoin(
joinType descpb.JoinType,
left, right exec.Node,
leftEqCols, rightEqCols []exec.NodeColumnOrdinal,
leftEqColsAreKey, rightEqColsAreKey bool,
extraOnCond tree.TypedExpr,
estimatedLeftRowCount, estimatedRightRowCount uint64,
) (exec.Node, error) {
p := ef.planner
leftPlan := left.(planNode)
rightPlan := right.(planNode)
leftCols := planColumns(leftPlan)
rightCols := planColumns(rightPlan)
pred := makePredicate(joinType, leftCols, rightCols, extraOnCond)
numEqCols := len(leftEqCols)
pred.leftEqualityIndices = leftEqCols
pred.rightEqualityIndices = rightEqCols
nameBuf := make(tree.NameList, 2*numEqCols)
pred.leftColNames = nameBuf[:numEqCols:numEqCols]
pred.rightColNames = nameBuf[numEqCols:]
for i := range leftEqCols {
pred.leftColNames[i] = tree.Name(leftCols[leftEqCols[i]].Name)
pred.rightColNames[i] = tree.Name(rightCols[rightEqCols[i]].Name)
}
pred.leftEqKey = leftEqColsAreKey
pred.rightEqKey = rightEqColsAreKey
return p.makeJoinNode(leftPlan, rightPlan, pred, estimatedLeftRowCount, estimatedRightRowCount), nil
}
// ConstructApplyJoin is part of the exec.Factory interface.
func (ef *execFactory) ConstructApplyJoin(
joinType descpb.JoinType,
left exec.Node,
rightColumns colinfo.ResultColumns,
onCond tree.TypedExpr,
planRightSideFn exec.ApplyJoinPlanRightSideFn,
rightSideForExplainFn exec.ApplyJoinRightSideForExplainFn,
) (exec.Node, error) {
l := left.(planNode)
leftCols := planColumns(l)
pred := makePredicate(joinType, leftCols, rightColumns, onCond)
return newApplyJoinNode(joinType, l, rightColumns, pred, planRightSideFn)
}
// ConstructMergeJoin is part of the exec.Factory interface.
func (ef *execFactory) ConstructMergeJoin(
joinType descpb.JoinType,
left, right exec.Node,
onCond tree.TypedExpr,
leftOrdering, rightOrdering colinfo.ColumnOrdering,
reqOrdering exec.OutputOrdering,
leftEqColsAreKey, rightEqColsAreKey bool,
estimatedLeftRowCount, estimatedRightRowCount uint64,
) (exec.Node, error) {
var err error
p := ef.planner
leftPlan := left.(planNode)
rightPlan := right.(planNode)
leftCols := planColumns(leftPlan)
rightCols := planColumns(rightPlan)
pred := makePredicate(joinType, leftCols, rightCols, onCond)
node := p.makeJoinNode(leftPlan, rightPlan, pred, estimatedLeftRowCount, estimatedRightRowCount)
pred.leftEqKey = leftEqColsAreKey
pred.rightEqKey = rightEqColsAreKey
pred.leftEqualityIndices, pred.rightEqualityIndices, node.mergeJoinOrdering, err = getEqualityIndicesAndMergeJoinOrdering(leftOrdering, rightOrdering)
if err != nil {
return nil, err
}
n := len(leftOrdering)
pred.leftColNames = make(tree.NameList, n)
pred.rightColNames = make(tree.NameList, n)
for i := 0; i < n; i++ {
leftColIdx, rightColIdx := leftOrdering[i].ColIdx, rightOrdering[i].ColIdx
pred.leftColNames[i] = tree.Name(leftCols[leftColIdx].Name)
pred.rightColNames[i] = tree.Name(rightCols[rightColIdx].Name)
}
// Set up node.props, which tells the distsql planner to maintain the
// resulting ordering (if needed).
node.reqOrdering = ReqOrdering(reqOrdering)
return node, nil
}
// ConstructScalarGroupBy is part of the exec.Factory interface.
func (ef *execFactory) ConstructScalarGroupBy(
input exec.Node, aggregations []exec.AggInfo, estimatedInputRowCount uint64,
) (exec.Node, error) {
// There are no grouping columns with scalar GroupBy, so we create empty
// arguments upfront to be passed into getResultColumnsForGroupBy call
// below.
var inputCols colinfo.ResultColumns
var groupCols []exec.NodeColumnOrdinal
n := &groupNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
funcs: make([]*aggregateFuncHolder, 0, len(aggregations)),
columns: getResultColumnsForGroupBy(inputCols, groupCols, aggregations),
isScalar: true,
estimatedRowCount: 1,
estimatedInputRowCount: estimatedInputRowCount,
}
if err := ef.addAggregations(n, aggregations); err != nil {
return nil, err
}
return n, nil
}
// ConstructGroupBy is part of the exec.Factory interface.
func (ef *execFactory) ConstructGroupBy(
input exec.Node,
groupCols []exec.NodeColumnOrdinal,
groupColOrdering colinfo.ColumnOrdering,
aggregations []exec.AggInfo,
reqOrdering exec.OutputOrdering,
groupingOrderType exec.GroupingOrderType,
estimatedRowCount uint64,
estimatedInputRowCount uint64,
) (exec.Node, error) {
inputPlan := input.(planNode)
inputCols := planColumns(inputPlan)
// TODO(harding): Use groupingOrder to determine when to use a hash
// aggregator.
n := &groupNode{
singleInputPlanNode: singleInputPlanNode{inputPlan},
funcs: make([]*aggregateFuncHolder, 0, len(groupCols)+len(aggregations)),
columns: getResultColumnsForGroupBy(inputCols, groupCols, aggregations),
groupCols: groupCols,
groupColOrdering: groupColOrdering,
isScalar: false,
reqOrdering: ReqOrdering(reqOrdering),
estimatedRowCount: estimatedRowCount,
estimatedInputRowCount: estimatedInputRowCount,
}
for _, col := range n.groupCols {
// TODO(radu): only generate the grouping columns we actually need.
f := newAggregateFuncHolder(
builtins.AnyNotNull,
[]exec.NodeColumnOrdinal{col},
nil, /* arguments */
false, /* isDistinct */
false, /* distsqlBlocklist */
)
n.funcs = append(n.funcs, f)
}
if err := ef.addAggregations(n, aggregations); err != nil {
return nil, err
}
return n, nil
}
func (ef *execFactory) addAggregations(n *groupNode, aggregations []exec.AggInfo) error {
for i := range aggregations {
agg := &aggregations[i]
f := newAggregateFuncHolder(
agg.FuncName,
agg.ArgCols,
agg.ConstArgs,
agg.Distinct,
agg.DistsqlBlocklist,
)
f.filterRenderIdx = int(agg.Filter)
n.funcs = append(n.funcs, f)
}
return nil
}
// ConstructDistinct is part of the exec.Factory interface.
func (ef *execFactory) ConstructDistinct(
input exec.Node,
distinctCols, orderedCols exec.NodeColumnOrdinalSet,
reqOrdering exec.OutputOrdering,
nullsAreDistinct bool,
errorOnDup string,
) (exec.Node, error) {
return &distinctNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
distinctOnColIdxs: distinctCols,
columnsInOrder: orderedCols,
reqOrdering: ReqOrdering(reqOrdering),
nullsAreDistinct: nullsAreDistinct,
errorOnDup: errorOnDup,
}, nil
}
// ConstructHashSetOp is part of the exec.Factory interface.
func (ef *execFactory) ConstructHashSetOp(
typ tree.UnionType, all bool, left, right exec.Node,
) (exec.Node, error) {
return ef.planner.newUnionNode(
typ, all, left.(planNode), right.(planNode),
nil /* leftOrdering */, nil, /* rightOrdering */
nil /* streamingOrdering */, nil, /* reqOrdering */
0 /* hardLimit */, false, /* enforceHomeRegion */
)
}
// ConstructStreamingSetOp is part of the exec.Factory interface.
func (ef *execFactory) ConstructStreamingSetOp(
typ tree.UnionType,
all bool,
left, right exec.Node,
leftOrdering, rightOrdering, streamingOrdering colinfo.ColumnOrdering,
reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
return ef.planner.newUnionNode(
typ,
all,
left.(planNode),
right.(planNode),
leftOrdering,
rightOrdering,
streamingOrdering,
ReqOrdering(reqOrdering),
0, /* hardLimit */
false, /* enforceHomeRegion */
)
}
// ConstructUnionAll is part of the exec.Factory interface.
func (ef *execFactory) ConstructUnionAll(
left, right exec.Node,
leftOrdering, rightOrdering colinfo.ColumnOrdering,
reqOrdering exec.OutputOrdering,
hardLimit uint64,
enforceHomeRegion bool,
) (exec.Node, error) {
return ef.planner.newUnionNode(
tree.UnionOp,
true, /* all */
left.(planNode),
right.(planNode),
leftOrdering,
rightOrdering,
colinfo.ColumnOrdering(reqOrdering),
ReqOrdering(reqOrdering),
hardLimit,
enforceHomeRegion,
)
}
// ConstructSort is part of the exec.Factory interface.
func (ef *execFactory) ConstructSort(
input exec.Node,
ordering exec.OutputOrdering,
alreadyOrderedPrefix int,
estimatedInputRowCount uint64,
) (exec.Node, error) {
return &sortNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
ordering: colinfo.ColumnOrdering(ordering),
alreadyOrderedPrefix: alreadyOrderedPrefix,
estimatedInputRowCount: estimatedInputRowCount,
}, nil
}
// ConstructOrdinality is part of the exec.Factory interface.
func (ef *execFactory) ConstructOrdinality(input exec.Node, colName string) (exec.Node, error) {
plan := input.(planNode)
inputColumns := planColumns(plan)
cols := make(colinfo.ResultColumns, len(inputColumns)+1)
copy(cols, inputColumns)
cols[len(cols)-1] = colinfo.ResultColumn{
Name: colName,
Typ: types.Int,
}
return &ordinalityNode{
singleInputPlanNode: singleInputPlanNode{plan},
columns: cols,
}, nil
}
// ConstructIndexJoin is part of the exec.Factory interface.
func (ef *execFactory) ConstructIndexJoin(
input exec.Node,
table cat.Table,
keyCols []exec.NodeColumnOrdinal,
tableCols exec.TableColumnOrdinalSet,
reqOrdering exec.OutputOrdering,
locking opt.Locking,
limitHint int64,
parallelize bool,
) (exec.Node, error) {
tabDesc := table.(*optTable).desc
colCfg := makeScanColumnsConfig(table, tableCols)
var fetch fetchPlanningInfo
if err := fetch.initDescDefaults(tabDesc, colCfg); err != nil {
return nil, err
}
idx := tabDesc.GetPrimaryIndex()
ef.recordIndexRead(tabDesc, idx)
fetch.index = idx
fetch.lockingStrength = descpb.ToScanLockingStrength(locking.Strength)
fetch.lockingWaitPolicy = descpb.ToScanLockingWaitPolicy(locking.WaitPolicy)
fetch.lockingDurability = descpb.ToScanLockingDurability(locking.Durability)
n := &indexJoinNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
columns: fetch.columns,
indexJoinPlanningInfo: indexJoinPlanningInfo{
fetch: fetch,
keyCols: keyCols,
reqOrdering: ReqOrdering(reqOrdering),
limitHint: limitHint,
parallelize: parallelize,
},
}
return n, nil
}
// ConstructLookupJoin is part of the exec.Factory interface.
func (ef *execFactory) ConstructLookupJoin(
joinType descpb.JoinType,
input exec.Node,
table cat.Table,
index cat.Index,
eqCols []exec.NodeColumnOrdinal,
eqColsAreKey bool,
lookupExpr tree.TypedExpr,
remoteLookupExpr tree.TypedExpr,
lookupCols exec.TableColumnOrdinalSet,
onCond tree.TypedExpr,
isFirstJoinInPairedJoiner bool,
isSecondJoinInPairedJoiner bool,
reqOrdering exec.OutputOrdering,
locking opt.Locking,
limitHint int64,
remoteOnlyLookups bool,
reverseScans bool,
parallelize bool,
) (exec.Node, error) {
if table.IsVirtualTable() {
return constructVirtualTableLookupJoin(
ef.planner, joinType, input, table, index, eqCols, lookupCols, onCond,
)
}
tabDesc := table.(*optTable).desc
idx := index.(*optIndex).idx
colCfg := makeScanColumnsConfig(table, lookupCols)
var fetch fetchPlanningInfo
if err := fetch.initDescDefaults(tabDesc, colCfg); err != nil {
return nil, err
}
ef.recordIndexRead(tabDesc, idx)
fetch.index = idx
fetch.lockingStrength = descpb.ToScanLockingStrength(locking.Strength)
fetch.lockingWaitPolicy = descpb.ToScanLockingWaitPolicy(locking.WaitPolicy)
fetch.lockingDurability = descpb.ToScanLockingDurability(locking.Durability)
n := &lookupJoinNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
lookupJoinPlanningInfo: lookupJoinPlanningInfo{
fetch: fetch,
joinType: joinType,
eqCols: eqCols,
eqColsAreKey: eqColsAreKey,
lookupExpr: lookupExpr,
remoteLookupExpr: remoteLookupExpr,
isFirstJoinInPairedJoiner: isFirstJoinInPairedJoiner,
isSecondJoinInPairedJoiner: isSecondJoinInPairedJoiner,
reqOrdering: ReqOrdering(reqOrdering),
limitHint: limitHint,
remoteOnlyLookups: remoteOnlyLookups,
reverseScans: reverseScans,
parallelize: parallelize,
},
}
if onCond != tree.DBoolTrue {
n.onCond = onCond
}
n.columns = getJoinResultColumns(joinType, planColumns(input.(planNode)), fetch.columns)
if isFirstJoinInPairedJoiner {
n.columns = append(n.columns, colinfo.ResultColumn{Name: "cont", Typ: types.Bool})
}
return n, nil
}
func constructVirtualTableLookupJoin(
p *planner,
joinType descpb.JoinType,
input exec.Node,
table cat.Table,
index cat.Index,
eqCols []exec.NodeColumnOrdinal,
lookupCols exec.TableColumnOrdinalSet,
onCond tree.TypedExpr,
) (planNode, error) {
tn := &table.(*optVirtualTable).name
virtual, err := p.getVirtualTabler().getVirtualTableEntry(tn)
if err != nil {
return nil, err
}
if !canQueryVirtualTable(p.EvalContext(), virtual) {
return nil, newUnimplementedVirtualTableError(tn.Schema(), tn.Table())
}
if len(eqCols) > 1 {
return nil, errors.AssertionFailedf("vtable indexes with more than one column aren't supported yet")
}
// Check for explicit use of the dummy column.
if lookupCols.Contains(0) {
return nil, errors.Errorf("use of %s column not allowed.", table.Column(0).ColName())
}
idx := index.(*optVirtualIndex).idx
tableDesc := table.(*optVirtualTable).desc
// Build the result columns.
inputCols := planColumns(input.(planNode))
if onCond == tree.DBoolTrue {
onCond = nil
}
var tableScan scanNode
// Set up a scanNode that we won't actually use, just to get the needed
// column analysis.
colCfg := makeScanColumnsConfig(table, lookupCols)
if err := tableScan.initDescDefaults(tableDesc, colCfg); err != nil {
return nil, err
}
tableScan.index = idx
vtableCols := colinfo.ResultColumnsFromColumns(tableDesc.GetID(), tableDesc.PublicColumns())
projectedVtableCols := planColumns(&tableScan)
var outputCols colinfo.ResultColumns
switch joinType {
case descpb.InnerJoin, descpb.LeftOuterJoin:
outputCols = make(colinfo.ResultColumns, 0, len(inputCols)+len(projectedVtableCols))
outputCols = append(outputCols, inputCols...)
outputCols = append(outputCols, projectedVtableCols...)
case descpb.LeftSemiJoin, descpb.LeftAntiJoin:
outputCols = make(colinfo.ResultColumns, 0, len(inputCols))
outputCols = append(outputCols, inputCols...)
default:
return nil, errors.AssertionFailedf("unexpected join type for virtual lookup join: %s", joinType.String())
}
pred := makePredicate(joinType, inputCols, projectedVtableCols, onCond)
n := &vTableLookupJoinNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
joinType: joinType,
virtualTableEntry: virtual,
dbName: tn.Catalog(),
table: tableDesc,
index: idx,
eqCol: int(eqCols[0]),
inputCols: inputCols,
vtableCols: vtableCols,
lookupCols: lookupCols,
columns: outputCols,
pred: pred,
}
return n, nil
}
func (ef *execFactory) ConstructInvertedJoin(
joinType descpb.JoinType,
invertedExpr tree.TypedExpr,
input exec.Node,
table cat.Table,
index cat.Index,
prefixEqCols []exec.NodeColumnOrdinal,
lookupCols exec.TableColumnOrdinalSet,
onCond tree.TypedExpr,
isFirstJoinInPairedJoiner bool,
reqOrdering exec.OutputOrdering,
locking opt.Locking,
) (exec.Node, error) {
tabDesc := table.(*optTable).desc
idx := index.(*optIndex).idx
colCfg := makeScanColumnsConfig(table, lookupCols)
var fetch fetchPlanningInfo
if err := fetch.initDescDefaults(tabDesc, colCfg); err != nil {
return nil, err
}
ef.recordIndexRead(tabDesc, idx)
fetch.index = idx
fetch.lockingStrength = descpb.ToScanLockingStrength(locking.Strength)
fetch.lockingWaitPolicy = descpb.ToScanLockingWaitPolicy(locking.WaitPolicy)
fetch.lockingDurability = descpb.ToScanLockingDurability(locking.Durability)
n := &invertedJoinNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
invertedJoinPlanningInfo: invertedJoinPlanningInfo{
fetch: fetch,
joinType: joinType,
prefixEqCols: prefixEqCols,
invertedExpr: invertedExpr,
isFirstJoinInPairedJoiner: isFirstJoinInPairedJoiner,
reqOrdering: ReqOrdering(reqOrdering),
},
}
if onCond != nil && onCond != tree.DBoolTrue {
n.onExpr = onCond
}
// Build the result columns.
n.columns = invertedJoinResultCols(
joinType, planColumns(input.(planNode)), fetch.columns, isFirstJoinInPairedJoiner,
)
return n, nil
}
func invertedJoinResultCols(
joinType descpb.JoinType,
inputCols, fetchCols colinfo.ResultColumns,
isFirstJoinInPairedJoiner bool,
) colinfo.ResultColumns {
var scanCols colinfo.ResultColumns
if joinType.ShouldIncludeRightColsInOutput() {
scanCols = fetchCols
}
numCols := len(inputCols) + len(scanCols)
if isFirstJoinInPairedJoiner {
numCols++
}
columns := make(colinfo.ResultColumns, 0, numCols)
columns = append(columns, inputCols...)
columns = append(columns, scanCols...)
if isFirstJoinInPairedJoiner {
columns = append(columns, colinfo.ResultColumn{Name: "cont", Typ: types.Bool})
}
return columns
}
// Helper function to create a fetchPlanningInfo struct from just a
// table / index descriptor and requested cols.
func (ef *execFactory) constructFetchForZigzag(
table cat.Table,
index cat.Index,
cols exec.TableColumnOrdinalSet,
eqCols []exec.TableColumnOrdinal,
locking opt.Locking,
) (_ fetchPlanningInfo, eqColOrdinals []int, _ error) {
colCfg := makeScanColumnsConfig(table, cols)
var err error
eqColOrdinals, err = tableToScanOrdinals(cols, eqCols)
if err != nil {
return fetchPlanningInfo{}, nil, err
}
tabDesc := table.(*optTable).desc
idx := index.(*optIndex).idx
var fetch fetchPlanningInfo
if err := fetch.initDescDefaults(tabDesc, colCfg); err != nil {
return fetchPlanningInfo{}, nil, err
}
ef.recordIndexRead(tabDesc, idx)
fetch.index = idx
fetch.lockingStrength = descpb.ToScanLockingStrength(locking.Strength)
fetch.lockingWaitPolicy = descpb.ToScanLockingWaitPolicy(locking.WaitPolicy)
fetch.lockingDurability = descpb.ToScanLockingDurability(locking.Durability)
return fetch, eqColOrdinals, nil
}
// ConstructZigzagJoin is part of the exec.Factory interface.
func (ef *execFactory) ConstructZigzagJoin(
leftTable cat.Table,
leftIndex cat.Index,
leftCols exec.TableColumnOrdinalSet,
leftFixedVals []tree.TypedExpr,
leftEqCols []exec.TableColumnOrdinal,
leftLocking opt.Locking,
rightTable cat.Table,
rightIndex cat.Index,
rightCols exec.TableColumnOrdinalSet,
rightFixedVals []tree.TypedExpr,
rightEqCols []exec.TableColumnOrdinal,
rightLocking opt.Locking,
onCond tree.TypedExpr,
reqOrdering exec.OutputOrdering,
) (exec.Node, error) {
if len(leftEqCols) != len(rightEqCols) {
return nil, errors.AssertionFailedf("creating zigzag join with unequal number of equated cols")
}
n := &zigzagJoinNode{
sides: make([]zigzagJoinSide, 2),
reqOrdering: ReqOrdering(reqOrdering),
}
var err error
n.sides[0].fetch, n.sides[0].eqCols, err = ef.constructFetchForZigzag(
leftTable, leftIndex, leftCols, leftEqCols, leftLocking,
)
if err != nil {
return nil, err
}
n.sides[1].fetch, n.sides[1].eqCols, err = ef.constructFetchForZigzag(
rightTable, rightIndex, rightCols, rightEqCols, rightLocking,
)
if err != nil {
return nil, err
}
if onCond != nil && onCond != tree.DBoolTrue {
n.onCond = onCond
}
// The resultant columns are identical to those from individual index scans; so
// reuse the resultColumns generated in the scanNodes.
n.columns = make(
colinfo.ResultColumns,
0,
len(n.sides[0].fetch.columns)+len(n.sides[1].fetch.columns),
)
n.columns = append(n.columns, n.sides[0].fetch.columns...)
n.columns = append(n.columns, n.sides[1].fetch.columns...)
// Fixed values are the values fixed for a prefix of each side's index columns.
// See the comment in pkg/sql/rowexec/zigzagjoiner.go for how they are used.
// mkFixedVals creates a values node that contains a single row with values
// for a prefix of the index columns.
// TODO(radu): using a valuesNode to represent a single tuple is dubious.
mkFixedVals := func(fixedVals []tree.TypedExpr, index cat.Index) *valuesNode {
cols := make(colinfo.ResultColumns, len(fixedVals))
for i := range cols {
col := index.Column(i)
cols[i].Name = string(col.ColName())
cols[i].Typ = col.DatumType()
}
return &valuesNode{
columns: cols,
tuples: [][]tree.TypedExpr{fixedVals},
specifiedInQuery: true,
}
}
n.sides[0].fixedVals = mkFixedVals(leftFixedVals, leftIndex)
n.sides[1].fixedVals = mkFixedVals(rightFixedVals, rightIndex)
return n, nil
}
// ConstructLimit is part of the exec.Factory interface.
func (ef *execFactory) ConstructLimit(
input exec.Node, limit, offset tree.TypedExpr,
) (exec.Node, error) {
plan := input.(planNode)
// If the input plan is also a limitNode that has just an offset, and we are
// only applying a limit, update the existing node. This is useful because
// Limit and Offset are separate operators which result in separate calls to
// this function.
if l, ok := plan.(*limitNode); ok && l.countExpr == nil && offset == nil {
l.countExpr = limit
return l, nil
}
return &limitNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
countExpr: limit,
offsetExpr: offset,
}, nil
}
// ConstructTopK is part of the exec.Factory interface.
func (ef *execFactory) ConstructTopK(
input exec.Node,
k int64,
ordering exec.OutputOrdering,
alreadyOrderedPrefix int,
estimatedInputRowCount uint64,
) (exec.Node, error) {
return &topKNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
k: k,
ordering: colinfo.ColumnOrdering(ordering),
alreadyOrderedPrefix: alreadyOrderedPrefix,
estimatedInputRowCount: estimatedInputRowCount,
}, nil
}
// ConstructMax1Row is part of the exec.Factory interface.
func (ef *execFactory) ConstructMax1Row(input exec.Node, errorText string) (exec.Node, error) {
return &max1RowNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
errorText: errorText,
}, nil
}
// ConstructBuffer is part of the exec.Factory interface.
func (ef *execFactory) ConstructBuffer(input exec.Node, label string) (exec.Node, error) {
return &bufferNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
label: label,
}, nil
}
// ConstructScanBuffer is part of the exec.Factory interface.
func (ef *execFactory) ConstructScanBuffer(ref exec.Node, label string) (exec.Node, error) {
if n, ok := ref.(*explain.Node); ok {
// This can happen if we used explain on the main query but we construct the
// scan buffer inside a separate plan (e.g. recursive CTEs).
ref = n.WrappedNode()
}
return &scanBufferNode{
buffer: ref.(*bufferNode),
label: label,
}, nil
}
// ConstructRecursiveCTE is part of the exec.Factory interface.
func (ef *execFactory) ConstructRecursiveCTE(
initial exec.Node, fn exec.RecursiveCTEIterationFn, label string, deduplicate bool,
) (exec.Node, error) {
return &recursiveCTENode{
singleInputPlanNode: singleInputPlanNode{initial.(planNode)},
genIterationFn: fn,
label: label,
deduplicate: deduplicate,
}, nil
}
// ConstructProjectSet is part of the exec.Factory interface.
func (ef *execFactory) ConstructProjectSet(
n exec.Node, exprs tree.TypedExprs, zipCols colinfo.ResultColumns, numColsPerGen []int,
) (exec.Node, error) {
p := n.(planNode)
cols := planColumns(p)
allCols := append(cols, zipCols...)
return &projectSetNode{
singleInputPlanNode: singleInputPlanNode{p},
projectSetPlanningInfo: projectSetPlanningInfo{
columns: allCols,
numColsInSource: len(cols),
exprs: exprs,
numColsPerGen: numColsPerGen,
},
}, nil
}
// ConstructWindow is part of the exec.Factory interface.
func (ef *execFactory) ConstructWindow(root exec.Node, wi exec.WindowInfo) (exec.Node, error) {
partitionIdxs := make([]uint32, len(wi.Partition))
for i, idx := range wi.Partition {
partitionIdxs[i] = uint32(idx)
}
p := &windowNode{
singleInputPlanNode: singleInputPlanNode{root.(planNode)},
columns: wi.Cols,
windowPlanningInfo: windowPlanningInfo{
partitionIdxs: partitionIdxs,
columnOrdering: wi.Ordering,
},
}
p.funcs = make([]*windowFuncHolder, len(wi.Exprs))
for i := range wi.Exprs {
argsIdxs := make([]uint32, len(wi.ArgIdxs[i]))
for j := range argsIdxs {
argsIdxs[j] = uint32(wi.ArgIdxs[i][j])
}
p.funcs[i] = &windowFuncHolder{
expr: wi.Exprs[i],
args: wi.Exprs[i].Exprs,
argsIdxs: argsIdxs,
filterColIdx: wi.FilterIdxs[i],
outputColIdx: wi.OutputIdxs[i],
frame: wi.Exprs[i].WindowDef.Frame,
}
if len(wi.Ordering) == 0 {
frame := p.funcs[i].frame
if frame.Mode == treewindow.RANGE && frame.Bounds.HasOffset() {
// Execution requires a single column to order by when there is
// a RANGE mode frame with at least one 'offset' bound.
return nil, errors.AssertionFailedf("a RANGE mode frame with an offset bound must have an ORDER BY column")
}
}
}
return p, nil
}
// ConstructPlan is part of the exec.Factory interface.
func (ef *execFactory) ConstructPlan(
root exec.Node,
subqueries []exec.Subquery,
cascades, triggers []exec.PostQuery,
checks []exec.Node,
rootRowCount int64,
flags exec.PlanFlags,
) (exec.Plan, error) {
return constructPlan(root, subqueries, cascades, triggers, checks, rootRowCount, flags)
}
// urlOutputter handles writing strings into an encoded URL for EXPLAIN (OPT,
// ENV). It also ensures that (in the text that is encoded by the URL) each
// entry gets its own line and there's exactly one blank line between entries.
type urlOutputter struct {
buf bytes.Buffer
}
func (e *urlOutputter) writef(format string, args ...interface{}) {
if e.buf.Len() > 0 {
e.buf.WriteString("\n")
}
fmt.Fprintf(&e.buf, format, args...)
}
func (e *urlOutputter) finish() (url.URL, error) {
// Generate a URL that encodes all the text.
var compressed bytes.Buffer
encoder := base64.NewEncoder(base64.URLEncoding, &compressed)
compressor := zlib.NewWriter(encoder)
if _, err := e.buf.WriteTo(compressor); err != nil {
return url.URL{}, err
}
if err := compressor.Close(); err != nil {
return url.URL{}, err
}
if err := encoder.Close(); err != nil {
return url.URL{}, err
}
return url.URL{
Scheme: "https",
Host: "cockroachdb.github.io",
Path: "text/decode.html",
Fragment: compressed.String(),
}, nil
}
// showEnv implements EXPLAIN (opt, env). It returns a node which displays
// the environment a query was run in.
func (ef *execFactory) showEnv(plan string, envOpts exec.ExplainEnvData) (exec.Node, error) {
var out urlOutputter
ie := ef.planner.extendedEvalCtx.ExecCfg.InternalDB.NewInternalExecutor(
ef.planner.SessionData(),
)
c := makeStmtEnvCollector(ef.ctx, ef.planner, ie.(*InternalExecutor), "" /* requesterUsername */)
// Show the version of Cockroach running.
if err := c.PrintVersion(&out.buf); err != nil {
return nil, err
}
out.writef("")
// Show the values of all non-default session variables and session
// settings.
if err := c.PrintSessionSettings(&out.buf, &ef.planner.extendedEvalCtx.Settings.SV, false /* all */); err != nil {
return nil, err
}
out.writef("")
if err := c.PrintClusterSettings(&out.buf, false /* all */); err != nil {
return nil, err
}
// Show the definition of each referenced catalog object.
for i := range envOpts.Sequences {
out.writef("")
if err := c.PrintCreateSequence(&out.buf, &envOpts.Sequences[i]); err != nil {
return nil, err
}
}
for i := range envOpts.Tables {
out.writef("")
if err := c.PrintCreateTable(
&out.buf, &envOpts.Tables[i], false, /* redactValues */
); err != nil {
return nil, err
}
out.writef("")
// In addition to the schema, it's important to know what the table
// statistics on each table are.
// NOTE: We don't include the histograms because they take up a ton of
// vertical space. Unfortunately this means that in some cases we won't be
// able to reproduce a particular plan.
err := c.PrintTableStats(&out.buf, &envOpts.Tables[i], true /* hideHistograms */)
if err != nil {
return nil, err
}
}
// PrintCreateTable above omitted the FK constraints from the schema, so we
// need to add them separately.
for _, addFK := range envOpts.AddFKs {
fmt.Fprintf(&out.buf, "%s;\n", addFK)
}
for i := range envOpts.Views {
out.writef("")
if err := c.PrintCreateView(&out.buf, &envOpts.Views[i], false /* redactValues */); err != nil {
return nil, err
}
}
// Show the query running. Note that this is the *entire* query, including
// the "EXPLAIN (opt, env)" preamble.
out.writef("%s;\n----\n%s", ef.planner.stmt.AST.String(), plan)
url, err := out.finish()
if err != nil {
return nil, err
}
return &valuesNode{
columns: append(colinfo.ResultColumns(nil), colinfo.ExplainPlanColumns...),
tuples: [][]tree.TypedExpr{{tree.NewDString(url.String())}},
specifiedInQuery: true,
}, nil
}
// ConstructExplainOpt is part of the exec.Factory interface.
func (ef *execFactory) ConstructExplainOpt(
planText string, envOpts exec.ExplainEnvData,
) (exec.Node, error) {
// If this was an EXPLAIN (opt, env), we need to run a bunch of auxiliary
// queries to fetch the environment info.
if envOpts.ShowEnv {
return ef.showEnv(planText, envOpts)
}
var rows [][]tree.TypedExpr
ss := strings.Split(strings.Trim(planText, "\n"), "\n")
for _, line := range ss {
rows = append(rows, []tree.TypedExpr{tree.NewDString(line)})
}
return &valuesNode{
columns: append(colinfo.ResultColumns(nil), colinfo.ExplainPlanColumns...),
tuples: rows,
specifiedInQuery: true,
}, nil
}
// ConstructShowTrace is part of the exec.Factory interface.
func (ef *execFactory) ConstructShowTrace(typ tree.ShowTraceType, compact bool) (exec.Node, error) {
var node planNode = ef.planner.makeShowTraceNode(compact, typ == tree.ShowTraceKV)
// Ensure the messages are sorted in age order, so that the user
// does not get confused.
ageColIdx := colinfo.GetTraceAgeColumnIdx(compact)
node = &sortNode{
singleInputPlanNode: singleInputPlanNode{node},
ordering: colinfo.ColumnOrdering{
colinfo.ColumnOrderInfo{ColIdx: ageColIdx, Direction: encoding.Ascending},
},
}
if typ == tree.ShowTraceReplica {
node = &showTraceReplicaNode{
singleInputPlanNode: singleInputPlanNode{node},
}
}
return node, nil
}
func ordinalsToIndexes(table cat.Table, ords cat.IndexOrdinals) []catalog.Index {
if len(ords) == 0 {
return nil
}
retval := make([]catalog.Index, len(ords))
for i, idx := range ords {
retval[i] = table.Index(idx).(*optIndex).idx
}
return retval
}
func ordinalsToIndexes2(
table cat.Table, a, b cat.IndexOrdinals,
) ([]catalog.Index, []catalog.Index) {
lenA, lenB := len(a), len(b)
if lenA+lenB == 0 {
return nil, nil
}
indexes := make([]catalog.Index, lenA+lenB)
indexesA, indexesB := indexes[:lenA:lenA], indexes[lenA:]
for i, idx := range a {
indexesA[i] = table.Index(idx).(*optIndex).idx
}
for i, idx := range b {
indexesB[i] = table.Index(idx).(*optIndex).idx
}
return indexesA, indexesB
}
func (ef *execFactory) ConstructInsert(
input exec.Node,
table cat.Table,
arbiterIndexes cat.IndexOrdinals,
arbiterConstraints cat.UniqueOrdinals,
insertColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
checkOrdSet exec.CheckOrdinalSet,
uniqueWithTombstoneIndexes cat.IndexOrdinals,
autoCommit bool,
vectorInsert bool,
) (exec.Node, error) {
// Derive insert table and column descriptors.
rowsNeeded := !returnColOrdSet.Empty()
tabDesc := table.(*optTable).desc
cols := makeColList(table, insertColOrdSet)
// Create the table inserter, which does the bulk of the work.
ri, err := row.MakeInserter(
ef.planner.ExecCfg().Codec,
tabDesc,
ordinalsToIndexes(table, uniqueWithTombstoneIndexes),
cols,
ef.planner.SessionData(),
&ef.planner.ExecCfg().Settings.SV,
ef.planner.ExecCfg().GetRowMetrics(ef.planner.SessionData().Internal),
)
if err != nil {
return nil, err
}
// Regular path for INSERT.
ins := insertNodePool.Get().(*insertNode)
*ins = insertNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
vectorInsert: vectorInsert,
run: insertRun{
ti: tableInserter{ri: ri},
checkOrds: checkOrdSet,
insertCols: ri.InsertCols,
},
}
ins.run.regionLocalInfo.setupEnforceHomeRegion(ef.planner, table, cols, ins.run.ti.ri.InsertColIDtoRowIndex)
// If rows are not needed, no columns are returned.
if rowsNeeded {
returnCols := makeColList(table, returnColOrdSet)
ins.columns = colinfo.ResultColumnsFromColumns(tabDesc.GetID(), returnCols)
// Set the tabColIdxToRetIdx for the mutation. Insert always returns
// non-mutation columns in the same order they are defined in the table.
ins.run.tabColIdxToRetIdx = makePublicToReturnColumnIndexMapping(tabDesc, returnCols)
ins.run.rowsNeeded = true
}
if autoCommit {
ins.enableAutoCommit()
}
return ins, nil
}
func (ef *execFactory) ConstructInsertFastPath(
rows [][]tree.TypedExpr,
table cat.Table,
insertColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
checkOrdSet exec.CheckOrdinalSet,
fkChecks []exec.InsertFastPathCheck,
uniqChecks []exec.InsertFastPathCheck,
uniqueWithTombstoneIndexes cat.IndexOrdinals,
autoCommit bool,
) (exec.Node, error) {
// Derive insert table and column descriptors.
rowsNeeded := !returnColOrdSet.Empty()
tabDesc := table.(*optTable).desc
cols := makeColList(table, insertColOrdSet)
// Create the table inserter, which does the bulk of the work.
ri, err := row.MakeInserter(
ef.planner.ExecCfg().Codec,
tabDesc,
ordinalsToIndexes(table, uniqueWithTombstoneIndexes),
cols,
ef.planner.SessionData(),
&ef.planner.ExecCfg().Settings.SV,
ef.planner.ExecCfg().GetRowMetrics(ef.planner.SessionData().Internal),
)
if err != nil {
return nil, err
}
// Regular path for INSERT.
ins := insertFastPathNodePool.Get().(*insertFastPathNode)
*ins = insertFastPathNode{
input: rows,
run: insertFastPathRun{
insertRun: insertRun{
ti: tableInserter{ri: ri},
checkOrds: checkOrdSet,
insertCols: ri.InsertCols,
},
},
}
ins.run.regionLocalInfo.setupEnforceHomeRegion(ef.planner, table, cols, ins.run.ti.ri.InsertColIDtoRowIndex)
if len(uniqChecks) > 0 {
ins.run.uniqChecks = make([]insertFastPathCheck, len(uniqChecks))
for i := range uniqChecks {
ins.run.uniqChecks[i].InsertFastPathCheck = uniqChecks[i]
}
}
if len(fkChecks) > 0 {
ins.run.fkChecks = make([]insertFastPathCheck, len(fkChecks))
for i := range fkChecks {
ins.run.fkChecks[i].InsertFastPathCheck = fkChecks[i]
}
}
// If rows are not needed, no columns are returned.
if rowsNeeded {
returnCols := makeColList(table, returnColOrdSet)
ins.columns = colinfo.ResultColumnsFromColumns(tabDesc.GetID(), returnCols)
// Set the tabColIdxToRetIdx for the mutation. Insert always returns
// non-mutation columns in the same order they are defined in the table.
ins.run.tabColIdxToRetIdx = makePublicToReturnColumnIndexMapping(tabDesc, returnCols)
ins.run.rowsNeeded = true
}
if len(rows) == 0 {
return &zeroNode{columns: ins.columns}, nil
}
if autoCommit {
ins.enableAutoCommit()
}
return ins, nil
}
func (ef *execFactory) ConstructUpdate(
input exec.Node,
table cat.Table,
fetchColOrdSet exec.TableColumnOrdinalSet,
updateColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
checks exec.CheckOrdinalSet,
passthrough colinfo.ResultColumns,
uniqueWithTombstoneIndexes cat.IndexOrdinals,
lockedIndexes cat.IndexOrdinals,
autoCommit bool,
) (exec.Node, error) {
// TODO(radu): the execution code has an annoying limitation that the fetch
// columns must be a superset of the update columns, even when the "old" value
// of a column is not necessary. The optimizer code for pruning columns is
// aware of this limitation.
if !updateColOrdSet.SubsetOf(fetchColOrdSet) {
return nil, errors.AssertionFailedf("execution requires all update columns have a fetch column")
}
rowsNeeded := !returnColOrdSet.Empty()
tabDesc := table.(*optTable).desc
upd := updateNodePool.Get().(*updateNode)
*upd = updateNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
}
// If rows are not needed, no columns are returned.
// TODO(mgartner): Combine returnCols allocations with allocations for
// fetchCols and updateCols in constructUpdateRun.
var returnCols []catalog.Column
if rowsNeeded {
returnCols = makeColList(table, returnColOrdSet)
upd.columns = colinfo.ResultColumnsFromColumns(tabDesc.GetID(), returnCols)
// Add the passthrough columns to the returning columns.
upd.columns = append(upd.columns, passthrough...)
}
// Create the table updater, which does the bulk of the work.
if err := ef.constructUpdateRun(
&upd.run, table, fetchColOrdSet, updateColOrdSet, returnColOrdSet, rowsNeeded, returnCols,
checks, passthrough, uniqueWithTombstoneIndexes, lockedIndexes,
); err != nil {
return nil, err
}
if autoCommit {
upd.enableAutoCommit()
}
return upd, nil
}
func (ef *execFactory) ConstructUpdateSwap(
input exec.Node,
table cat.Table,
fetchColOrdSet exec.TableColumnOrdinalSet,
updateColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
passthrough colinfo.ResultColumns,
lockedIndexes cat.IndexOrdinals,
autoCommit bool,
) (exec.Node, error) {
// TODO(radu): the execution code has an annoying limitation that the fetch
// columns must be a superset of the update columns, even when the "old" value
// of a column is not necessary. The optimizer code for pruning columns is
// aware of this limitation.
if !updateColOrdSet.SubsetOf(fetchColOrdSet) {
return nil, errors.AssertionFailedf("execution requires all update columns have a fetch column")
}
// For update swap, fetch columns need to include at least every column that
// could appear in the primary index.
primaryIndex := table.Index(cat.PrimaryIndex)
for i := 0; i < primaryIndex.ColumnCount(); i++ {
col := primaryIndex.Column(i)
if col.Kind() == cat.System {
continue
}
if !fetchColOrdSet.Contains(col.Ordinal()) {
return nil, errors.AssertionFailedf("fetch columns missing col %v", col.ColName())
}
}
rowsNeeded := !returnColOrdSet.Empty()
tabDesc := table.(*optTable).desc
upd := updateSwapNodePool.Get().(*updateSwapNode)
*upd = updateSwapNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
}
// If rows are not needed, no columns are returned.
var returnCols []catalog.Column
if rowsNeeded {
returnCols = makeColList(table, returnColOrdSet)
upd.columns = colinfo.ResultColumnsFromColumns(tabDesc.GetID(), returnCols)
// Add the passthrough columns to the returning columns.
upd.columns = append(upd.columns, passthrough...)
}
// Create the table updater, which does the bulk of the work.
if err := ef.constructUpdateRun(
&upd.run, table, fetchColOrdSet, updateColOrdSet, returnColOrdSet, rowsNeeded,
returnCols, exec.CheckOrdinalSet{} /* checks */, passthrough,
nil /* uniqueWithTombstoneIndexes */, lockedIndexes,
); err != nil {
return nil, err
}
if autoCommit {
upd.enableAutoCommit()
}
return upd, nil
}
func (ef *execFactory) constructUpdateRun(
run *updateRun,
table cat.Table,
fetchColOrdSet exec.TableColumnOrdinalSet,
updateColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
rowsNeeded bool,
returnCols []catalog.Column,
checks exec.CheckOrdinalSet,
passthrough colinfo.ResultColumns,
uniqueWithTombstoneIndexes cat.IndexOrdinals,
lockedIndexes cat.IndexOrdinals,
) error {
tabDesc := table.(*optTable).desc
fetchCols, updateCols := makeColList2(table, fetchColOrdSet, updateColOrdSet)
// Create the table updater.
tombstoneIdxs, lockIdxs := ordinalsToIndexes2(table, uniqueWithTombstoneIndexes, lockedIndexes)
ru, err := row.MakeUpdater(
ef.planner.ExecCfg().Codec,
tabDesc,
tombstoneIdxs,
lockIdxs,
updateCols,
fetchCols,
row.UpdaterDefault,
ef.planner.SessionData(),
&ef.planner.ExecCfg().Settings.SV,
ef.planner.ExecCfg().GetRowMetrics(ef.planner.SessionData().Internal),
)
if err != nil {
return err
}
run.tu = tableUpdater{ru: ru}
run.checkOrds = checks
run.numPassthrough = len(passthrough)
run.regionLocalInfo.setupEnforceHomeRegion(ef.planner, table, ru.UpdateCols,
run.tu.ru.UpdateColIDtoRowIndex)
if rowsNeeded {
// Set the rowIdxToRetIdx for the mutation. Update returns the non-mutation
// columns specified, in the same order they are defined in the table.
//
// The Updater derives/stores the fetch columns of the mutation and
// since the return columns are always a subset of the fetch columns,
// we can use use the fetch columns to generate the mapping for the
// returned rows.
run.rowIdxToRetIdx = row.ColMapping(ru.FetchCols, returnCols)
run.rowsNeeded = true
}
return nil
}
func (ef *execFactory) ConstructUpsert(
input exec.Node,
table cat.Table,
arbiterIndexes cat.IndexOrdinals,
arbiterConstraints cat.UniqueOrdinals,
canaryCol exec.NodeColumnOrdinal,
insertColOrdSet exec.TableColumnOrdinalSet,
fetchColOrdSet exec.TableColumnOrdinalSet,
updateColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
checks exec.CheckOrdinalSet,
uniqueWithTombstoneIndexes cat.IndexOrdinals,
lockedIndexes cat.IndexOrdinals,
autoCommit bool,
) (exec.Node, error) {
// Derive table and column descriptors.
rowsNeeded := !returnColOrdSet.Empty()
tabDesc := table.(*optTable).desc
insertCols := makeColList(table, insertColOrdSet)
fetchCols := makeColList(table, fetchColOrdSet)
updateCols := makeColList(table, updateColOrdSet)
// Create the table inserter, which does the bulk of the insert-related work.
ri, err := row.MakeInserter(
ef.planner.ExecCfg().Codec,
tabDesc,
ordinalsToIndexes(table, uniqueWithTombstoneIndexes),
insertCols,
ef.planner.SessionData(),
&ef.planner.ExecCfg().Settings.SV,
ef.planner.ExecCfg().GetRowMetrics(ef.planner.SessionData().Internal),
)
if err != nil {
return nil, err
}
// Create the table updater, which does the bulk of the update-related work.
tombstoneIdxs, lockIdxs := ordinalsToIndexes2(table, uniqueWithTombstoneIndexes, lockedIndexes)
ru, err := row.MakeUpdater(
ef.planner.ExecCfg().Codec,
tabDesc,
tombstoneIdxs,
lockIdxs,
updateCols,
fetchCols,
row.UpdaterDefault,
ef.planner.SessionData(),
&ef.planner.ExecCfg().Settings.SV,
ef.planner.ExecCfg().GetRowMetrics(ef.planner.SessionData().Internal),
)
if err != nil {
return nil, err
}
// Instantiate the upsert node.
ups := upsertNodePool.Get().(*upsertNode)
*ups = upsertNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
run: upsertRun{
checkOrds: checks,
insertCols: ri.InsertCols,
tw: tableUpserter{
ri: ri,
canaryOrdinal: int(canaryCol),
fetchCols: fetchCols,
updateCols: updateCols,
ru: ru,
},
},
}
// If rows are not needed, no columns are returned.
if rowsNeeded {
returnCols := makeColList(table, returnColOrdSet)
ups.columns = colinfo.ResultColumnsFromColumns(tabDesc.GetID(), returnCols)
// Update the tabColIdxToRetIdx for the mutation. Upsert returns
// non-mutation columns specified, in the same order they are defined
// in the table.
ups.run.tw.tabColIdxToRetIdx = makePublicToReturnColumnIndexMapping(tabDesc, returnCols)
ups.run.tw.returnCols = returnCols
ups.run.tw.rowsNeeded = true
}
if autoCommit {
ups.enableAutoCommit()
}
return ups, nil
}
func (ef *execFactory) ConstructDelete(
input exec.Node,
table cat.Table,
fetchColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
passthrough colinfo.ResultColumns,
lockedIndexes cat.IndexOrdinals,
autoCommit bool,
) (exec.Node, error) {
rowsNeeded := !returnColOrdSet.Empty()
tabDesc := table.(*optTable).desc
// Now make a delete node. We use a pool.
del := deleteNodePool.Get().(*deleteNode)
*del = deleteNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
}
// If rows are not needed, no columns are returned.
var returnCols []catalog.Column
if rowsNeeded {
returnCols = makeColList(table, returnColOrdSet)
// Delete returns the non-mutation columns specified, in the same
// order they are defined in the table.
del.columns = colinfo.ResultColumnsFromColumns(tabDesc.GetID(), returnCols)
// Add the passthrough columns to the returning columns.
del.columns = append(del.columns, passthrough...)
}
// Create the table deleter, which does the bulk of the work.
ef.constructDeleteRun(
&del.run, table, fetchColOrdSet, rowsNeeded, returnCols, passthrough, lockedIndexes,
)
if autoCommit {
del.enableAutoCommit()
}
return del, nil
}
func (ef *execFactory) ConstructDeleteSwap(
input exec.Node,
table cat.Table,
fetchColOrdSet exec.TableColumnOrdinalSet,
returnColOrdSet exec.TableColumnOrdinalSet,
passthrough colinfo.ResultColumns,
lockedIndexes cat.IndexOrdinals,
autoCommit bool,
) (exec.Node, error) {
// For delete swap, fetch columns need to include at least every column that
// could appear in the primary index.
primaryIndex := table.Index(cat.PrimaryIndex)
for i := 0; i < primaryIndex.ColumnCount(); i++ {
col := primaryIndex.Column(i)
if col.Kind() == cat.System {
continue
}
if !fetchColOrdSet.Contains(col.Ordinal()) {
return nil, errors.AssertionFailedf("fetch columns missing col %v", col.ColName())
}
}
rowsNeeded := !returnColOrdSet.Empty()
tabDesc := table.(*optTable).desc
// Now make a delete node. We use a pool.
del := deleteSwapNodePool.Get().(*deleteSwapNode)
*del = deleteSwapNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
}
// If rows are not needed, no columns are returned.
var returnCols []catalog.Column
if rowsNeeded {
returnCols = makeColList(table, returnColOrdSet)
// Delete returns the non-mutation columns specified, in the same
// order they are defined in the table.
del.columns = colinfo.ResultColumnsFromColumns(tabDesc.GetID(), returnCols)
// Add the passthrough columns to the returning columns.
del.columns = append(del.columns, passthrough...)
}
// Create the table deleter, which does the bulk of the work.
ef.constructDeleteRun(
&del.run, table, fetchColOrdSet, rowsNeeded, returnCols, passthrough, lockedIndexes,
)
if autoCommit {
del.enableAutoCommit()
}
return del, nil
}
func (ef *execFactory) constructDeleteRun(
run *deleteRun,
table cat.Table,
fetchColOrdSet exec.TableColumnOrdinalSet,
rowsNeeded bool,
returnCols []catalog.Column,
passthrough colinfo.ResultColumns,
lockedIndexes cat.IndexOrdinals,
) {
tabDesc := table.(*optTable).desc
fetchCols := makeColList(table, fetchColOrdSet)
// Create the table deleter.
rd := row.MakeDeleter(
ef.planner.ExecCfg().Codec,
tabDesc,
ordinalsToIndexes(table, lockedIndexes),
fetchCols,
ef.planner.SessionData(),
&ef.planner.ExecCfg().Settings.SV,
ef.planner.ExecCfg().GetRowMetrics(ef.planner.SessionData().Internal),
)
run.td = tableDeleter{rd: rd}
run.numPassthrough = len(passthrough)
if rowsNeeded {
run.rowIdxToRetIdx = row.ColMapping(rd.FetchCols, returnCols)
run.rowsNeeded = true
}
}
func (ef *execFactory) ConstructDeleteRange(
table cat.Table,
needed exec.TableColumnOrdinalSet,
indexConstraint *constraint.Constraint,
autoCommit bool,
) (exec.Node, error) {
tabDesc := table.(*optTable).desc
var sb span.Builder
sb.Init(ef.planner.EvalContext(), ef.planner.ExecCfg().Codec, tabDesc, tabDesc.GetPrimaryIndex())
splitter := span.MakeSplitterForDelete(tabDesc, tabDesc.GetPrimaryIndex(), needed)
spans, err := sb.SpansFromConstraint(indexConstraint, splitter)
if err != nil {
return nil, err
}
dr := &deleteRangeNode{
spans: spans,
desc: tabDesc,
autoCommitEnabled: autoCommit,
}
return dr, nil
}
// ConstructVectorSearch is part of the exec.Factory interface.
func (ef *execFactory) ConstructVectorSearch(
table cat.Table,
index cat.Index,
outCols exec.TableColumnOrdinalSet,
prefixConstraint *constraint.Constraint,
queryVector tree.TypedExpr,
targetNeighborCount uint64,
) (exec.Node, error) {
tabDesc := table.(*optTable).desc
idx := index.(*optIndex).idx
cols := makeColList(table, outCols)
resultCols := colinfo.ResultColumnsFromColumns(tabDesc.GetID(), cols)
// Encode the prefix constraint as a list of roachpb.Keys.
var sb span.Builder
sb.InitAllowingExternalRowData(
ef.planner.EvalContext(), ef.planner.ExecCfg().Codec, tabDesc, idx,
)
prefixKeys, err := sb.KeysFromVectorPrefixConstraint(ef.ctx, prefixConstraint)
if err != nil {
return nil, err
}
ef.recordIndexRead(tabDesc, idx)
return &vectorSearchNode{
vectorSearchPlanningInfo: vectorSearchPlanningInfo{
table: tabDesc,
index: idx,
prefixKeys: prefixKeys,
queryVector: queryVector,
targetNeighborCount: targetNeighborCount,
cols: cols,
columns: resultCols,
},
}, nil
}
// ConstructVectorMutationSearch is part of the exec.Factory interface.
func (ef *execFactory) ConstructVectorMutationSearch(
input exec.Node,
table cat.Table,
index cat.Index,
prefixKeyCols []exec.NodeColumnOrdinal,
queryVectorCol exec.NodeColumnOrdinal,
suffixKeyCols []exec.NodeColumnOrdinal,
isIndexPut bool,
) (exec.Node, error) {
// Pass through the input columns, and project the partition key column and
// optionally the quantized vectors.
inputPlan := input.(planNode)
inputColumns := planColumns(inputPlan)
cols := make(colinfo.ResultColumns, len(inputColumns), len(inputColumns)+2)
copy(cols, inputColumns)
cols = append(cols, colinfo.ResultColumn{Name: "partition-key", Typ: types.Int})
if isIndexPut {
cols = append(cols, colinfo.ResultColumn{Name: "quantized-vector", Typ: types.Bytes})
}
tabDesc := table.(*optTable).desc
idx := index.(*optIndex).idx
ef.recordIndexRead(tabDesc, idx)
return &vectorMutationSearchNode{
singleInputPlanNode: singleInputPlanNode{input: inputPlan},
vectorMutationSearchPlanningInfo: vectorMutationSearchPlanningInfo{
table: tabDesc,
index: idx,
prefixKeyCols: prefixKeyCols,
queryVectorCol: queryVectorCol,
suffixKeyCols: suffixKeyCols,
isIndexPut: isIndexPut,
},
columns: cols,
}, nil
}
// ConstructCreateTable is part of the exec.Factory interface.
func (ef *execFactory) ConstructCreateTable(
schema cat.Schema, ct *tree.CreateTable,
) (exec.Node, error) {
if err := checkSchemaChangeEnabled(
ef.ctx,
ef.planner.ExecCfg(),
"CREATE TABLE",
); err != nil {
return nil, err
}
return &createTableNode{
n: ct,
dbDesc: schema.(*optSchema).database,
}, nil
}
// ConstructCreateTableAs is part of the exec.Factory interface.
func (ef *execFactory) ConstructCreateTableAs(
input exec.Node, schema cat.Schema, ct *tree.CreateTable,
) (exec.Node, error) {
if err := checkSchemaChangeEnabled(
ef.ctx,
ef.planner.ExecCfg(),
"CREATE TABLE",
); err != nil {
return nil, err
}
return &createTableNode{
n: ct,
dbDesc: schema.(*optSchema).database,
input: input.(planNode),
}, nil
}
// ConstructCreateView is part of the exec.Factory interface.
func (ef *execFactory) ConstructCreateView(
createView *tree.CreateView,
schema cat.Schema,
viewQuery string,
columns colinfo.ResultColumns,
deps opt.SchemaDeps,
typeDeps opt.SchemaTypeDeps,
funcDeps opt.SchemaFunctionDeps,
) (exec.Node, error) {
if err := checkSchemaChangeEnabled(
ef.ctx,
ef.planner.ExecCfg(),
"CREATE VIEW",
); err != nil {
return nil, err
}
planDeps, typeDepSet, funcDepSet, err := toPlanDependencies(deps, typeDeps, funcDeps)
if err != nil {
return nil, err
}
return &createViewNode{
createView: createView,
viewQuery: viewQuery,
dbDesc: schema.(*optSchema).database,
columns: columns,
planDeps: planDeps,
typeDeps: typeDepSet,
funcDeps: funcDepSet,
}, nil
}
// ConstructCreateFunction is part of the exec.Factory interface.
func (ef *execFactory) ConstructCreateFunction(
schema cat.Schema,
cf *tree.CreateRoutine,
deps opt.SchemaDeps,
typeDeps opt.SchemaTypeDeps,
functionDeps opt.SchemaFunctionDeps,
) (exec.Node, error) {
if err := checkSchemaChangeEnabled(
ef.ctx,
ef.planner.ExecCfg(),
"CREATE FUNCTION",
); err != nil {
return nil, err
}
plan, err := ef.planner.SchemaChange(ef.ctx, cf)
if err != nil {
return nil, err
}
if plan != nil {
return plan, nil
}
planDeps, typeDepSet, funcDepList, err := toPlanDependencies(deps, typeDeps, functionDeps)
if err != nil {
return nil, err
}
return &createFunctionNode{
cf: cf,
dbDesc: schema.(*optSchema).database,
scDesc: schema.(*optSchema).schema,
planDeps: planDeps,
typeDeps: typeDepSet,
functionDeps: funcDepList,
}, nil
}
// ConstructCreateTrigger is part of the exec.Factory interface.
func (ef *execFactory) ConstructCreateTrigger(ct *tree.CreateTrigger) (exec.Node, error) {
if err := checkSchemaChangeEnabled(
ef.ctx,
ef.planner.ExecCfg(),
"CREATE TRIGGER",
); err != nil {
return nil, err
}
plan, err := ef.planner.SchemaChange(ef.ctx, ct)
if err != nil {
return nil, err
}
if plan == nil {
return nil, pgerror.New(pgcode.FeatureNotSupported,
"CREATE TRIGGER is only implemented in the declarative schema changer")
}
return plan, nil
}
func toPlanDependencies(
deps opt.SchemaDeps, typeDeps opt.SchemaTypeDeps, funcDeps opt.SchemaFunctionDeps,
) (planDependencies, typeDependencies, functionDependencies, error) {
planDeps := make(planDependencies, len(deps))
for _, d := range deps {
desc, err := getDescForDataSource(d.DataSource)
if err != nil {
return nil, nil, nil, err
}
var ref descpb.TableDescriptor_Reference
if d.SpecificIndex {
idx := d.DataSource.(cat.Table).Index(d.Index)
ref.IndexID = idx.(*optIndex).idx.GetID()
}
if !d.ColumnOrdinals.Empty() {
ref.ColumnIDs = make([]descpb.ColumnID, 0, d.ColumnOrdinals.Len())
d.ColumnOrdinals.ForEach(func(ord int) {
ref.ColumnIDs = append(ref.ColumnIDs, desc.AllColumns()[ord].GetID())
})
}
entry := planDeps[desc.GetID()]
entry.desc = desc
entry.deps = append(entry.deps, ref)
planDeps[desc.GetID()] = entry
}
typeDepSet := make(typeDependencies, typeDeps.Len())
typeDeps.ForEach(func(id int) {
typeDepSet[descpb.ID(id)] = struct{}{}
})
funcDepList := make(functionDependencies, funcDeps.Len())
funcDeps.ForEach(func(id int) {
funcDepList[descpb.ID(id)] = struct{}{}
})
return planDeps, typeDepSet, funcDepList, nil
}
// ConstructSequenceSelect is part of the exec.Factory interface.
func (ef *execFactory) ConstructSequenceSelect(sequence cat.Sequence) (exec.Node, error) {
return ef.planner.SequenceSelectNode(sequence.(*optSequence).desc)
}
// ConstructSaveTable is part of the exec.Factory interface.
func (ef *execFactory) ConstructSaveTable(
input exec.Node, table *cat.DataSourceName, colNames []string,
) (exec.Node, error) {
return ef.planner.makeSaveTable(input.(planNode), table, colNames), nil
}
// ConstructErrorIfRows is part of the exec.Factory interface.
func (ef *execFactory) ConstructErrorIfRows(
input exec.Node, mkErr exec.MkErrFn,
) (exec.Node, error) {
return &errorIfRowsNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
mkErr: mkErr,
}, nil
}
// ConstructOpaque is part of the exec.Factory interface.
func (ef *execFactory) ConstructOpaque(metadata opt.OpaqueMetadata) (exec.Node, error) {
return constructOpaque(metadata)
}
// ConstructAlterTableSplit is part of the exec.Factory interface.
func (ef *execFactory) ConstructAlterTableSplit(
index cat.Index, input exec.Node, expiration tree.TypedExpr,
) (exec.Node, error) {
execCfg := ef.planner.ExecCfg()
if err := checkSchemaChangeEnabled(
ef.ctx,
execCfg,
"ALTER TABLE/INDEX SPLIT AT",
); err != nil {
return nil, err
}
if err := sqlclustersettings.RequireSystemTenantOrClusterSetting(execCfg.Codec, execCfg.Settings, SecondaryTenantSplitAtEnabled); err != nil {
return nil, err
}
expirationTime, err := parseExpirationTime(ef.ctx, ef.planner.EvalContext(), expiration)
if err != nil {
return nil, err
}
return &splitNode{
tableDesc: index.Table().(*optTable).desc,
index: index.(*optIndex).idx,
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
expirationTime: expirationTime,
}, nil
}
// ConstructAlterTableUnsplit is part of the exec.Factory interface.
func (ef *execFactory) ConstructAlterTableUnsplit(
index cat.Index, input exec.Node,
) (exec.Node, error) {
if err := checkSchemaChangeEnabled(
ef.ctx,
ef.planner.ExecCfg(),
"ALTER TABLE/INDEX UNSPLIT AT",
); err != nil {
return nil, err
}
return &unsplitNode{
tableDesc: index.Table().(*optTable).desc,
index: index.(*optIndex).idx,
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
}, nil
}
// ConstructAlterTableUnsplitAll is part of the exec.Factory interface.
func (ef *execFactory) ConstructAlterTableUnsplitAll(index cat.Index) (exec.Node, error) {
if err := checkSchemaChangeEnabled(
ef.ctx,
ef.planner.ExecCfg(),
"ALTER TABLE/INDEX UNSPLIT ALL",
); err != nil {
return nil, err
}
return &unsplitAllNode{
tableDesc: index.Table().(*optTable).desc,
index: index.(*optIndex).idx,
}, nil
}
// ConstructAlterTableRelocate is part of the exec.Factory interface.
func (ef *execFactory) ConstructAlterTableRelocate(
index cat.Index, input exec.Node, relocateSubject tree.RelocateSubject,
) (exec.Node, error) {
return &relocateNode{
subjectReplicas: relocateSubject,
tableDesc: index.Table().(*optTable).desc,
index: index.(*optIndex).idx,
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
}, nil
}
// ConstructAlterRangeRelocate is part of the exec.Factory interface.
func (ef *execFactory) ConstructAlterRangeRelocate(
input exec.Node,
relocateSubject tree.RelocateSubject,
toStoreID tree.TypedExpr,
fromStoreID tree.TypedExpr,
) (exec.Node, error) {
return &relocateRange{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
subjectReplicas: relocateSubject,
toStoreID: toStoreID,
fromStoreID: fromStoreID,
}, nil
}
// ConstructControlJobs is part of the exec.Factory interface.
func (ef *execFactory) ConstructControlJobs(
command tree.JobCommand, input exec.Node, reason tree.TypedExpr,
) (exec.Node, error) {
reasonDatum, err := eval.Expr(ef.ctx, ef.planner.EvalContext(), reason)
if err != nil {
return nil, err
}
var reasonStr string
if reasonDatum != tree.DNull {
reasonStrDatum, ok := reasonDatum.(*tree.DString)
if !ok {
return nil, errors.Errorf("expected string value for the reason")
}
reasonStr = string(*reasonStrDatum)
}
return &controlJobsNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
desiredStatus: jobCommandToDesiredStatus[command],
reason: reasonStr,
}, nil
}
// ConstructControlJobs is part of the exec.Factory interface.
func (ef *execFactory) ConstructControlSchedules(
command tree.ScheduleCommand, input exec.Node,
) (exec.Node, error) {
return &controlSchedulesNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
command: command,
}, nil
}
// ConstructShowCompletions is part of the exec.Factory interface.
func (ef *execFactory) ConstructShowCompletions(command *tree.ShowCompletions) (exec.Node, error) {
return &completionsNode{
n: command,
}, nil
}
// ConstructCancelQueries is part of the exec.Factory interface.
func (ef *execFactory) ConstructCancelQueries(input exec.Node, ifExists bool) (exec.Node, error) {
return &cancelQueriesNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
ifExists: ifExists,
}, nil
}
// ConstructCancelSessions is part of the exec.Factory interface.
func (ef *execFactory) ConstructCancelSessions(input exec.Node, ifExists bool) (exec.Node, error) {
return &cancelSessionsNode{
singleInputPlanNode: singleInputPlanNode{input.(planNode)},
ifExists: ifExists,
}, nil
}
// ConstructCreateStatistics is part of the exec.Factory interface
func (ef *execFactory) ConstructCreateStatistics(
cs *tree.CreateStats, table cat.Table, index cat.Index, whereConstraint *constraint.Constraint,
) (exec.Node, error) {
if err := featureflag.CheckEnabled(
ef.ctx,
ef.planner.ExecCfg(),
featureStatsEnabled,
"ANALYZE/CREATE STATISTICS",
); err != nil {
return nil, err
}
var whereSpans roachpb.Spans
var whereIndexID descpb.IndexID
if whereConstraint != nil {
tabDesc := table.(*optTable).desc
idx := index.(*optIndex).idx
whereIndexID = idx.GetID()
var sb span.Builder
sb.InitAllowingExternalRowData(
ef.planner.EvalContext(), ef.planner.ExecCfg().Codec, tabDesc, idx,
)
spans, err := sb.SpansFromConstraint(whereConstraint, span.NoopSplitter())
if err != nil {
return nil, err
}
whereSpans = spans
}
// Don't run as a job if we are inside an EXPLAIN / EXPLAIN ANALYZE. That will
// allow us to get insight into the actual execution.
runAsJob := !ef.isExplain && ef.planner.instrumentation.ShouldUseJobForCreateStats()
return &createStatsNode{
CreateStats: *cs,
p: ef.planner,
runAsJob: runAsJob,
whereSpans: whereSpans,
whereIndexID: whereIndexID,
}, nil
}
// ConstructExplain is part of the exec.Factory interface.
func (ef *execFactory) ConstructExplain(
options *tree.ExplainOptions,
stmtType tree.StatementReturnType,
buildFn exec.BuildPlanForExplainFn,
) (exec.Node, error) {
if options.Flags[tree.ExplainFlagEnv] {
return nil, errors.New("ENV only supported with (OPT) option")
}
plan, err := buildFn(&execFactory{
ctx: ef.ctx,
planner: ef.planner,
isExplain: true,
})
if err != nil {
return nil, err
}
if options.Mode == tree.ExplainVec {
wrappedPlan := plan.(*explain.Plan).WrappedPlan.(*planComponents)
return &explainVecNode{
options: options,
plan: *wrappedPlan,
}, nil
}
if options.Mode == tree.ExplainDDL {
wrappedPlan := plan.(*explain.Plan).WrappedPlan.(*planComponents)
return &explainDDLNode{
options: options,
plan: *wrappedPlan,
}, nil
}
flags := explain.MakeFlags(options)
if ef.planner.execCfg.TestingKnobs.DeterministicExplain {
flags.Deflake = explain.DeflakeVolatile
}
n := &explainPlanNode{
options: options,
flags: flags,
plan: plan.(*explain.Plan),
}
return n, nil
}
// ConstructCall is part of the exec.Factory interface.
func (e *execFactory) ConstructCall(proc *tree.RoutineExpr) (exec.Node, error) {
return &callNode{proc: proc}, nil
}
// renderBuilder encapsulates the code to build a renderNode.
type renderBuilder struct {
r *renderNode
res planNode
}
// init initializes the renderNode with render expressions.
func (rb *renderBuilder) init(n exec.Node, reqOrdering exec.OutputOrdering) {
p := n.(planNode)
rb.r = &renderNode{
singleInputPlanNode: singleInputPlanNode{p},
columns: planColumns(p),
}
rb.r.reqOrdering = ReqOrdering(reqOrdering)
rb.res = rb.r
}
// setOutput sets the output of the renderNode. exprs is the list of render
// expressions, and columns is the list of information about the expressions,
// including their names, types, and so on. They must be the same length.
func (rb *renderBuilder) setOutput(exprs tree.TypedExprs, columns colinfo.ResultColumns) {
rb.r.render = exprs
rb.r.columns = columns
}
// makeColList returns a list of table column interfaces. Columns are
// included if their ordinal position in the table schema is in the cols set.
func makeColList(table cat.Table, cols exec.TableColumnOrdinalSet) []catalog.Column {
tab := table.(optCatalogTableInterface)
ret := make([]catalog.Column, 0, cols.Len())
for i, n := 0, table.ColumnCount(); i < n; i++ {
if !cols.Contains(i) {
continue
}
ret = append(ret, tab.getCol(i))
}
return ret
}
// makeColList2 is similar to makeColList, but it takes two sets of ordinals and
// allocates a single slice which is split into two.
func makeColList2(
table cat.Table, a, b exec.TableColumnOrdinalSet,
) ([]catalog.Column, []catalog.Column) {
tab := table.(optCatalogTableInterface)
lenA, lenB := a.Len(), b.Len()
cols := make([]catalog.Column, 0, lenA+lenB)
listA, listB := cols[:0:lenA], cols[lenA:lenA]
for i, n := 0, table.ColumnCount(); i < n; i++ {
col := tab.getCol(i)
if a.Contains(i) {
listA = append(listA, col)
}
if b.Contains(i) {
listB = append(listB, col)
}
}
return listA, listB
}
// makePublicToReturnColumnIndexMapping returns a map from the ordinals
// of the table's public columns to ordinals in the returnColDescs slice.
//
// More precisely, for 0 <= i < len(tableDesc.PublicColumns()):
// result[i] = j such that returnColDescs[j].ID is the ID of
// the i'th public column, or
// -1 if the i'th public column is not found in returnColDescs.
func makePublicToReturnColumnIndexMapping(
tableDesc catalog.TableDescriptor, returnCols []catalog.Column,
) []int {
return row.ColMapping(tableDesc.PublicColumns(), returnCols)
} | go | github | https://github.com/cockroachdb/cockroach | pkg/sql/opt_exec_factory.go |
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import os.path
from datetime import datetime, timedelta
simple = os.environ.get('MWS_MERCHANT', None)
if not simple:
print("""
Please set the MWS_MERCHANT environmental variable
to your Merchant or SellerId to enable MWS tests.
""")
advanced = False
isolator = True
if __name__ == "__main__":
devpath = os.path.relpath(os.path.join('..', '..', '..'),
start=os.path.dirname(__file__))
sys.path = [devpath] + sys.path
advanced = simple and True or False
if advanced:
print('>>> advanced MWS tests; using local boto sources')
from boto.mws.connection import MWSConnection
from tests.compat import unittest
class MWSTestCase(unittest.TestCase):
def setUp(self):
self.mws = MWSConnection(Merchant=simple, debug=0)
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_feedlist(self):
self.mws.get_feed_submission_list()
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_inbound_status(self):
response = self.mws.get_inbound_service_status()
status = response.GetServiceStatusResult.Status
self.assertIn(status, ('GREEN', 'GREEN_I', 'YELLOW', 'RED'))
@property
def marketplace(self):
try:
return self._marketplace
except AttributeError:
response = self.mws.list_marketplace_participations()
result = response.ListMarketplaceParticipationsResult
self._marketplace = result.ListMarketplaces.Marketplace[0]
return self.marketplace
@property
def marketplace_id(self):
return self.marketplace.MarketplaceId
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_marketplace_participations(self):
response = self.mws.list_marketplace_participations()
result = response.ListMarketplaceParticipationsResult
self.assertTrue(result.ListMarketplaces.Marketplace[0].MarketplaceId)
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_get_product_categories_for_asin(self):
asin = '144930544X'
response = self.mws.get_product_categories_for_asin(
MarketplaceId=self.marketplace_id,
ASIN=asin)
self.assertEqual(len(response._result.Self), 3)
categoryids = [x.ProductCategoryId for x in response._result.Self]
self.assertSequenceEqual(categoryids, ['285856', '21', '491314'])
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_list_matching_products(self):
response = self.mws.list_matching_products(
MarketplaceId=self.marketplace_id,
Query='boto')
products = response._result.Products
self.assertTrue(len(products))
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_get_matching_product(self):
asin = 'B001UDRNHO'
response = self.mws.get_matching_product(
MarketplaceId=self.marketplace_id,
ASINList=[asin])
attributes = response._result[0].Product.AttributeSets.ItemAttributes
self.assertEqual(attributes[0].Label, 'Serengeti')
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_get_matching_product_for_id(self):
asins = ['B001UDRNHO', '144930544X']
response = self.mws.get_matching_product_for_id(
MarketplaceId=self.marketplace_id,
IdType='ASIN',
IdList=asins)
self.assertEqual(len(response._result), 2)
for result in response._result:
self.assertEqual(len(result.Products.Product), 1)
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_get_lowest_offer_listings_for_asin(self):
asin = '144930544X'
response = self.mws.get_lowest_offer_listings_for_asin(
MarketplaceId=self.marketplace_id,
ItemCondition='New',
ASINList=[asin])
listings = response._result[0].Product.LowestOfferListings
self.assertTrue(len(listings.LowestOfferListing))
@unittest.skipUnless(simple and isolator, "skipping simple test")
def test_list_inventory_supply(self):
asof = (datetime.today() - timedelta(days=30)).isoformat()
response = self.mws.list_inventory_supply(QueryStartDateTime=asof,
ResponseGroup='Basic')
self.assertTrue(hasattr(response._result, 'InventorySupplyList'))
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../test';
export default test({
html: `
<div>
Hello
<p>bar</p>
<p>foo</p>
</div>
`
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/component-slot-component-named/_config.js |
import { defineConfig } from 'tsup'
export default defineConfig({
format: ['iife'],
clean: true,
minify: true,
entry: ['src/index.ts'],
noExternal: [/.*/],
loader: {
'.css': 'text',
},
define: {
'process.env.NODE_ENV': '"production"',
'process.env.FEATURES_ENV': '"stable"',
},
esbuildPlugins: [
{
name: 'patch-intellisense-apis',
setup(build) {
build.onLoad({ filter: /intellisense.ts$/ }, () => {
return {
contents: `
export function getClassList() { return [] }
export function getVariants() { return [] }
export function canonicalizeCandidates() { return [] }
`,
}
})
},
},
],
}) | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/@tailwindcss-browser/tsup.config.ts |
# Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import json
import time
import threading
import subprocess
import pyroute2
from kilda.traffexam import context as context_module
from kilda.traffexam import exc
from kilda.traffexam import model
from kilda.traffexam import system
class Abstract(system.NSIPDBMixin, context_module.ContextConsumer):
def __init__(self, context):
super().__init__(context)
self._pool = {}
self._lock = threading.Lock()
def create(self, subject):
with self._lock:
try:
item = self._create(subject)
except Exception as e:
raise exc.ServiceCreateError(self, subject) from e
self._pool[self.key(item)] = item
return item
def list(self):
return tuple(self._pool.values())
def lookup(self, key):
try:
item = self._pool[key]
except KeyError:
raise exc.ServiceLookupError(self, key) from None
return item
def delete(self, key, ignore_missing=False):
with self._lock:
try:
subject = self._pool.pop(key)
except KeyError:
if not ignore_missing:
raise exc.ServiceLookupError(self, key) from None
return
try:
self._delete(subject)
except Exception as e:
self._pool[key] = subject
raise exc.ServiceDeleteError(self, key, subject) from e
def _create(self, subject):
raise NotImplementedError
def _delete(self, subject):
raise NotImplementedError
def key(self, subject):
raise NotImplementedError
def get_gw_iface(self):
return self.context.shared_registry.fetch(system.VEthPair).ns
class VLANService(Abstract):
def key(self, subject):
return subject.tag
def _create(self, subject):
tag = self.key(subject)
ifname = self.make_iface_name(tag)
ip = self.get_ipdb()
with ip.create(
kind='vlan', ifname=ifname, vlan_id=tag,
link=self.get_gw_iface()) as iface:
iface.up()
iface = ip.interfaces[ifname].ro
subject.set_iface(model.NetworkIface(
ifname, index=iface.index, vlan_tag=tag))
return subject
def _delete(self, subject):
tag = self.key(subject)
ifname = self.make_iface_name(tag)
with self.get_ipdb().interfaces[ifname] as iface:
iface.remove()
@staticmethod
def make_iface_name(tag):
return 'vlan.{}'.format(tag)
class IpAddressService(Abstract):
def key(self, subject):
return subject.idnr
def _create(self, subject):
if subject.iface is None:
subject.iface = model.NetworkIface(self.get_gw_iface())
name = subject.iface.get_ipdb_key()
with self.get_ipdb().interfaces[name] as iface:
iface.add_ip(subject.address, mask=subject.prefix)
return subject
def _delete(self, subject):
name = subject.iface.get_ipdb_key()
with self.get_ipdb().interfaces[name] as iface:
iface.del_ip(subject.address, mask=subject.prefix)
class EndpointService(Abstract):
def key(self, subject):
return subject.idnr
def get_report(self, key):
entity = self.lookup(key)
proc = entity.proc
if proc.poll() is None:
return None
out = []
for path in (
self.make_report_file_name(entity),
self.make_error_file_name(entity)):
with open(str(path), 'rt') as stream:
out.append(stream.read())
report, error = out
report = json.loads(report)
return report, error
def _create(self, subject):
if isinstance(subject, model.ConsumerEndpoint):
self._create_consumer(subject)
elif isinstance(subject, model.ProducerEndpoint):
self._create_producer(subject)
else:
raise ValueError('Unsupported payload {!r}'.format(subject))
return subject
def _delete(self, subject):
for file in (
self.make_report_file_name(subject),
self.make_error_file_name(subject)):
try:
file.unlink()
except FileNotFoundError:
pass
try:
for attempt in range(3):
if subject.proc.poll() is not None:
break
subject.proc.terminate()
time.sleep(1)
else:
subject.proc.kill()
except OSError as e:
if e.errno != errno.ESRCH:
raise
subject.proc.wait()
if isinstance(subject, model.ConsumerEndpoint):
subject.bind_address.free_port(subject.bind_port)
def _create_consumer(self, subject):
subject.bind_port = subject.bind_address.alloc_port()
cmd = self.make_cmd_common_part(subject)
cmd += [
'--server',
'--one-off',
'--port={}'.format(subject.bind_port)]
self.run_iperf(subject, cmd)
def _create_producer(self, subject):
cmd = self.make_cmd_common_part(subject)
cmd += [
'--client={}'.format(subject.remote_address.address),
'--port={}'.format(subject.remote_address.port),
'--bandwidth={}'.format(subject.bandwidth * 1024),
'--time={}'.format(subject.time),
'--interval=1',
'--udp']
self.run_iperf(subject, cmd)
def make_cmd_common_part(self, subject):
cmd = [
'ip', 'netns', 'exec', self.context.make_network_namespace_name(),
'iperf3', '--json', '--interval=1']
if subject.bind_address is not None:
cmd.append('--bind={}'.format(subject.bind_address.address))
return cmd
def run_iperf(self, subject, cmd):
report = open(str(self.make_report_file_name(subject)), 'wb')
err = open(str(self.make_error_file_name(subject)), 'wb')
proc = subprocess.Popen(cmd, stdout=report, stderr=err)
subject.set_proc(proc)
self.context.children.add(proc)
def make_report_file_name(self, subject):
return self.context.path('{}.json'.format(subject.idnr))
def make_error_file_name(self, subject):
return self.context.path('{}.err'.format(subject.idnr))
class Adapter(object):
def __init__(self, context):
self.address = IpAddressService(context)
self.vlan = VLANService(context)
self.endpoint = EndpointService(context) | unknown | codeparrot/codeparrot-clean | ||
from rest_framework import status
from rest_framework.test import APITestCase
from .models import Empresa, Calificacion
from .views import *
class EmpresaRESTTests(APITestCase):
def test_crear_empresa(self):
data = { "nombre" : "test", "ciudad" : "ciudadtest", "sector" : "sectortest" }
response = self.client.post("/empresas/", data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Empresa.objects.get().nombre, "test")
print("Creada empresa correctamente con interfaz REST")
def test_mostrar_empresas(self):
emp1 = Empresa(nombre="test", ciudad="ciudadtest", sector="sectortest")
emp1.save()
emp2 = Empresa(nombre="test2", ciudad="ciudadtest2", sector="sectortest2")
emp2.save()
response = self.client.get("/empresas/")
self.assertEqual(response.content, b'[{"nombre":"test","ciudad":"ciudadtest","sector":"sectortest"},{"nombre":"test2","ciudad":"ciudadtest2","sector":"sectortest2"}]')
print("Listado de empresas realizado con éxito mediante interfaz REST")
class CalificacionRESTTest(APITestCase):
def test_mostrar_calificaciones(self):
e = Empresa(nombre="test", ciudad="ciudadtest", sector="sectortest")
e.save()
cal1 = Calificacion(alumno="alumtest", calificacion=10, empresa=e)
cal1.save()
cal2 = Calificacion(alumno="alum2test", calificacion=0, empresa=e)
cal2.save()
response = self.client.get("/empresas/1/")
self.assertEqual(response.content, b'[{"alumno":"alumtest","calificacion":10,"empresa":1},{"alumno":"alum2test","calificacion":0,"empresa":1}]')
print("Listado de calificacion de una empresa exitoso con interfaz REST")
# Create your tests here. | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Templates for automating the design of different wafer styles.
.. note::
Copyright 2009-2012 Lucas Heitzmann Gabrielli
Copyright 2013 Andrew G. Mark
gdsCAD (based on gdspy) is released under the terms of the GNU GPL
"""
# TODO: Make it more pythonic, create separate classes for cells, blocks etc. to make it easier to read
import string
from operator import itemgetter
import networkx as nx
import numpy as np
from descartes.patch import PolygonPatch
from .core import Cell, Path, Boundary
from .shapes import Circle, Label, LineLabel
from shapely.affinity import rotate as rotateshape
from shapely.affinity import translate as translateshape
from shapely.geometry import Polygon, Point, LineString, box
RED = '#F0421D'
ORANGE = '#F0AC1D'
GREEN = '#1DF042'
BLUE = '#1DF0AC'
BLACK = '#000000'
# Helper function:
# Given two points from a line, returns a cell containing a dashed line connecting the two points
def dashed_line(pt1, pt2, dashlength, width, layer):
line = LineString((pt1, pt2))
dash_pts = np.arange(0, line.length, dashlength).tolist()
if len(dash_pts) % 2 == 1: # Odd number
dash_pts.append(line.length) # Add last point on line
dash_pts = list(map(line.interpolate, dash_pts)) # Interpolate points along this line to make dashes
dash_pts = [pt.xy for pt in dash_pts]
dash_pts = np.reshape(dash_pts, (-1, 2, 2))
lines = [Path(list(linepts), width=width, layer=layer) for linepts in dash_pts]
dline = Cell('DASHLINE')
dline.add(lines)
return dline
class Wafer_TriangStyle(Cell):
"""
Wafer style for [111] wafers consisting of triangular blocks of patterned features.
:param name: The name of the new wafer cell
:param wafer_r: the radius of the wafer in um
:param cells: a list of cells that will be tiled to fill each blocks
the cells will be cycled until all blocks are filled.
:param block_gap: the gap between the triangular blocks
:param cell_gap: the gap between the square cells within each block
:param trisize: in um, length of triangle sides
:param cellsize: size of each cell within a block
:param MCIterations: Number of monte carlo iterations used to find optimal position of cells within the blocks
:param doMCSearch: Whether or not to optimize the placement of the square cells within each triangular block
:param block_gap: the distance to leave between blocks
:param symmetric_chips: makes the up-facing and down-facing chips symmetric by rotating them 180 degrees. However for direction-sensitive devices (ex: branched structures) the 180 degree rotation is undersirable.
:returns: A new wafer ``Cell``
Spacing between cells in a block is determined automatically based on the cell
bounding box, or by using the attribute cell.spacing if it is available.
"""
# the placement of the wafer alignment points
align_pts = None
def __init__(self,
name,
cells=None,
wafer_r=25.5e3,
trisize=10e3,
cellsize=2e3,
block_gap=0.,
cell_gap=200.,
doMCSearch=True,
MCIterations=30, # Small square cells
doMCBlockSearch=True,
MCBlockIterations=50, # Large triangular blocks
mkWidth=10,
cellsAtEdges=False,
symmetric_chips=True):
Cell.__init__(self, name)
self.wafer_r = wafer_r
self.trisize = trisize
self.cellsize = cellsize
self.block_gap = block_gap
self.cell_gap = cell_gap
self.doMCSearch = doMCSearch
self.MCIterations = MCIterations
self.doMCBlockSearch = doMCBlockSearch
self.MCBlockIterations = MCBlockIterations
# Create a circle shape with the radius of the wafer
circ = Point(0., 0.)
self.waferShape = circ.buffer(wafer_r)
self.blockOffset = (0, 0)
self.cells = cells
self.cell_layers = self._cell_layers()
self._label = None
self.upCellLattice = []
self.downCellLattice = []
self.upCenters = []
self.downCenters = []
self.upTris = []
self.downTris = []
self.cellsAtEdges = cellsAtEdges
self.symmetric_chips = symmetric_chips
def _cell_layers(self):
"""
A list of all active layers in ``cells``
"""
cell_layers = set()
for c in self.cells:
if isinstance(c, Cell):
cell_layers |= set(c.get_layers())
else:
for s in c:
cell_layers |= set(s.get_layers())
return list(cell_layers)
def add_aligment_marks(self, layers):
"""
Create alignment marks on all active layers
"""
if not (type(layers) == list): layers = [layers]
d_layers = self.cell_layers
# styles=['B' if i%2 else 'B' for i in range(len(d_layers))]
# am = AlignmentMarks(styles, d_layers)
am = Cell('CONT_ALGN')
# Define dimensions of the alignment cross
t = 200. # Thickness
t /= 2.
h = 2000. # Height
w = 2000. # Width
crosspts = [
(-t, t), (-t, h), (t, h), (t, t), (w, t), (w, -t), (t, -t), (t, -h),
(-t, -h), (-t, -t), (-w, -t), (-w, t)]
# Create shapely polygon for later calculation
crossPolygon = Polygon(crosspts)
crossPolygons = []
for pt in self.align_pts:
crossPolygons.extend([
translateshape(crossPolygon, xoff=pt[0], yoff=pt[1])])
# TODO: Replace these two loops with a single loop, looping over an array of block objects
# TODO: Make the deleting more efficient by using del for multiple indexes?
i_del = []
# Loop over all triangular blocks
for i, tri in enumerate(self.upTris):
for poly in crossPolygons: # Loop over all alignment crosses
if poly.intersects(tri) or poly.within(tri) or poly.contains(
tri):
# If conflict is detected, remove that triangular block
i_del.append(i)
print(('up:' + str(self.upTris[i].centroid.xy)))
self.upTris = [tri for i, tri in enumerate(self.upTris) if i not in i_del]
# Repeat for down-facing triangles
i_del = []
for i, tri in enumerate(self.downTris):
for poly in crossPolygons:
if poly.intersects(tri) or poly.within(tri) or poly.contains(
tri):
# If conflict is detected, remove that triangular block
i_del.append(i)
print(('down:' + str(self.downTris[i].centroid.xy)))
self.downTris = [tri for i, tri in enumerate(self.downTris) if i not in i_del]
# Refresh the centers of the remaining triangles
self.upCenters = [list(zip(*tri.centroid.xy)[0]) for tri in self.upTris]
self.downCenters = [list(zip(*tri.centroid.xy)[0])
for tri in self.downTris]
for l in layers: # Add marker to all layers
cross = Boundary(crosspts, layer=l) # Create gdsCAD shape
am.add(cross)
mblock = Cell('WAF_ALGN_BLKS')
mblock.add(am)
for pt in self.align_pts:
self.add(mblock, origin=pt)
def add_orientation_text(self, layers):
"""
Create Orientation Label
"""
if not (type(layers) == list): layers = [layers]
tblock = Cell('WAF_ORI_TEXT')
for l in layers:
for (t, pt) in list(self.o_text.items()):
txt = Label(t, 1000, layer=l)
bbox = txt.bounding_box
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(np.array(pt))
tblock.add(txt)
self.add(tblock)
def add_dicing_marks(self, layers, mkWidth=100):
"""
Create dicing marks
"""
if not (type(layers) == list): layers = [layers]
# Define the array and wafer parameters
gap = self.block_gap
wafer_r = self.wafer_r
sl_tri = self.trisize
sl_lattice = sl_tri + gap / np.tan(np.deg2rad(30))
h_lattice = np.sqrt(3.) / 2. * sl_lattice
# Create the lattice of the "up" facing triangles
points = self.createPtLattice(2. * wafer_r, sl_lattice / 2., h_lattice)
points = [np.array(elem) for elem in points]
points = points + np.array(
[-sl_lattice / 2., 0]) # Shift lattice so we can cleave the wafer at y=0
points = points + np.array(self.blockOffset) # Shift by point from MC search (if applicable)
points = [
point
for point in points
if (Point(point).distance(Point(0, 0)) < wafer_r)
]
import pylab as plt
# Plot points
x, y = list(zip(*points))
plt.plot(x, y, 'ko')
# Create a lineshape of the boundary of the circle
c = self.waferShape.boundary
# Create a set (unordered with unique entries)
dicinglines = set()
# For each point in the lattice, create three lines (one along each direction)
for x, y in points:
l0 = LineString([(-4. * wafer_r, y), (4. * wafer_r, y)])
l1 = rotateshape(l0, 60, origin=(x, y))
l2 = rotateshape(l0, -60, origin=(x, y))
# See where these lines intersect the wafer outline
i0 = c.intersection(l0)
i1 = c.intersection(l1)
i2 = c.intersection(l2)
p0s = tuple(map(tuple, np.round((i0.geoms[0].coords[0], i0.geoms[
1].coords[0]))))
p1s = tuple(map(tuple, np.round((i1.geoms[0].coords[0], i1.geoms[
1].coords[0]))))
p2s = tuple(map(tuple, np.round((i2.geoms[0].coords[0], i2.geoms[
1].coords[0]))))
# Add these points to a unique unordered set
dicinglines.add(p0s)
dicinglines.add(p1s)
dicinglines.add(p2s)
# At the end of the loop, the set will contain a list of point pairs which can be uesd to make the dicing marks
dmarks = Cell('DIC_MRKS')
for l in layers:
for p1, p2 in dicinglines:
dicingline = Path([p1, p2], width=mkWidth, layer=l)
dmarks.add(dicingline)
self.add(dmarks)
return points
def add_wafer_outline(self, layers):
"""
Create Wafer Outline
"""
if not (type(layers) == list): layers = [layers]
outline = Cell('WAF_OLINE')
for l in layers:
circ = Circle((0, 0), self.wafer_r, 100, layer=l)
outline.add(circ)
self.add(outline)
# Gets an optimized list of points where the cells will then be projected within each block
def getCellLattice(self, cellsize=2000):
iterations = self.MCIterations
ycelloffset = self.cell_gap / 3.5 # Arbitrary, change by trial and error
if self.doMCSearch:
best = [0, 0, 0, 0]
# Iterates many times to find the best fit
for i in range(iterations):
# Random seed point
rndpt = (0, np.random.randint(-cellsize, cellsize))
# Make cells around this point
cells = self.makeCells(startpt=rndpt, cellsize=cellsize)
if not cells:
continue
centroidDist = np.array([cell.centroid.xy for cell in cells]).squeeze()
if len(centroidDist.shape) == 2:
centroidDist = centroidDist.mean(0)
if len(cells) > best[1]:
best = [rndpt, len(cells), cells, centroidDist]
elif len(cells) == best[1]:
# Choose the one that is closer to the center of the wafer
if np.sqrt(rndpt[0] ** 2 + rndpt[1] ** 2) < np.sqrt(best[0][0] ** 2 + best[0][1] ** 2):
# if centroidDist < best[3]:
best = [rndpt, len(cells), cells, centroidDist]
# print("Current: {:f}, Best {:f}").format(len(cells),best[1])
# centroidDist = np.array([tri.centroid.xy for tri in cells]).squeeze().mean(0)
# centroidDist = np.sqrt(centroidDist[0]**2+centroidDist[1]**2)
# centroidDist = np.array([cell.centroid.xy for cell in cells]).squeeze()
# Choose the best configuration (fits the most cells and is closest to centroid)
cells = best[2]
else:
cells = self.makeCells(cellsize=2000)
sl_tri = self.trisize
h_tri = np.sqrt(3.) / 2. * sl_tri
gap = self.block_gap
from matplotlib import pyplot
fig = pyplot.figure(1, dpi=90)
ax = fig.add_subplot(111)
ax.grid()
ax.axis('equal')
block = Polygon([
[-sl_tri / 2., -h_tri / 3.], [sl_tri / 2., -h_tri / 3.],
[0, 2. * h_tri / 3.], [-sl_tri / 2., -h_tri / 3.]
])
block = translateshape(block, yoff=h_tri / 3. + gap / 2.)
block = translateshape(block, xoff=self.blockOffset[0],
yoff=self.blockOffset[1]) # TODO: plot output not working properly because of this?
patch = PolygonPatch(block,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
# facecolor=RED,
edgecolor=BLACK,
alpha=0.3,
zorder=2)
ax.add_patch(patch)
ax.plot(block.exterior.coords.xy[0], block.exterior.coords.xy[1], 'k-')
for cell in cells:
cell = translateshape(cell, yoff=h_tri / 3. + gap / 2. + ycelloffset)
cell = translateshape(cell, xoff=self.blockOffset[0],
yoff=self.blockOffset[1]) # TODO: plot output not working properly because of this?
patch = PolygonPatch(cell,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
edgecolor='k',
alpha=0.3,
zorder=2)
ax.add_patch(patch)
# Convert cells to lattice of points
cellLattice = np.array([list(zip(*cell.centroid.xy))[0] for cell in cells])
cellLattice = cellLattice + np.array([0, ycelloffset])
return cellLattice
# Make takes square cells and sees how many can be fit into a triangular block
def makeCells(self, cellsize=2000, startpt=(0, 0)):
gap = self.cell_gap
# Define the parameters of our shapes
if self.cellsAtEdges:
sl_tri = self.trisize * 1.5 # Only needed if you want to put cells very close the edge of triangle chip
else:
sl_tri = self.trisize
h_tri = np.sqrt(3.) / 2. * sl_tri
# Create the triangular block
block = Polygon([
[-sl_tri / 2., -h_tri / 3.], [sl_tri / 2., -h_tri / 3.],
[0, 2. * h_tri / 3.], [-sl_tri / 2., -h_tri / 3.]
])
# Make a square cell
cell = box(-cellsize / 2., -cellsize / 2., cellsize / 2., cellsize / 2.)
# Make a lattice for the cells
# lattice = self.createPtLattice(sl_tri, cellsize / 2. + gap / 2.,cellsize/2. + gap)
lattice = self.createPtLattice(sl_tri, (cellsize + gap) / 2., (cellsize + gap) * np.sqrt(3.) / 2.)
lattice = lattice + np.array(startpt)
lattice = [
pt for pt in lattice if Point(pt).within(block)
] # Keep only points within triangular block
# Use the lattice of points to translate the cell all over the block
cells = [translateshape(cell, xoff=x, yoff=y) for x, y in lattice]
# Keep only the cells that are fully within the block
cells = [f for f in cells if f.within(block)]
return cells
def build_and_add_blocks(self):
"""
Create blocks and add them to the wafer Cell
"""
self.upCellLattice = self.getCellLattice(cellsize=2000)
# Create a cell for the triangular blocks
block_up = Cell('upblock')
for x, y in self.upCellLattice:
block_up.add(self.cells, origin=(x, y))
# Take each point in block lattice and make a copy of the block in that location
for x, y in self.upCenters:
self.add(block_up, origin=(x, y))
if self.symmetric_chips:
for x, y in self.downCenters:
self.add(block_up, origin=(x, y), rotation=180)
else:
self.downCellLattice = np.array(self.upCellLattice) * np.array([1, -1])
block_down = Cell('downblock')
for x, y in self.downCellLattice:
block_down.add(self.cells, origin=(x, y))
for x, y in self.downCenters:
self.add(block_down, origin=(x, y))
def plotTriangles(self, tris):
from matplotlib import pyplot
from matplotlib.patches import Circle
fig = pyplot.figure(1, dpi=90)
ax = fig.add_subplot(111)
ax.grid()
# Draw the wafer
circle = Circle(
(0, 0),
self.wafer_r,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
edgecolor=BLACK,
alpha=1)
ax.add_patch(circle)
tricenters = [tri.centroid.xy for tri in tris]
x, y = list(zip(*tricenters))
ax.plot(x, y, 'bo')
# Draw all the triangles
for i, item in enumerate(tris):
x, y = item.exterior.coords.xy
ax.plot(x, y, 'k-')
patch = PolygonPatch(item,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
edgecolor=BLACK,
alpha=0.5,
zorder=2)
ax.add_patch(patch)
ax.axis('equal')
def makeTriang(self, xs, ys, s, orient):
h = np.sqrt(3.) / 2. * s
ps = []
for x, y in zip(xs, ys):
if orient == "up":
p0 = [x - s / 2., y - h / 3.]
p1 = [x, y + 2. * h / 3.]
p2 = [x + s / 2., y - h / 3.]
else:
p0 = [x - s / 2., y + h / 3.]
p1 = [x, y - 2. * h / 3.]
p2 = [x + s / 2., y + h / 3.]
ps.append(Polygon([p0, p1, p2]))
return ps
def createPtLattice(self, size, xgap, ygap):
G = nx.Graph(directed=False)
G.add_node((0, 0))
for n in range(int(size / min([xgap, ygap]))):
for (q, r) in G.nodes():
G.add_edge((q, r), (q - xgap, r - ygap))
G.add_edge((q, r), (q + xgap, r + ygap))
G.add_edge((q, r), (q - xgap, r + ygap))
G.add_edge((q, r), (q + xgap, r - ygap))
uniquepts = set(tuple(map(tuple, np.round(list(G.node.keys()), 10))))
return list(map(np.array, uniquepts)) # Return only unique points
def makeBlocks(self, trisize, startpt=(0, 0)):
gap = self.block_gap
wafer_r = self.wafer_r
sl_tri = self.trisize # Sidelength of the triangular blocks
h_tri = np.sqrt(3.) / 2. * sl_tri # Height of triangular blocks
sl_lattice = sl_tri + gap / np.tan(
np.deg2rad(30)
) # Sidelength of the block lattice (including the gaps between blocks)
h_lattice = np.sqrt(
3.) / 2. * sl_lattice # Height of the lattice triangles
# Create the lattice of the "up" facing triangles
points = self.createPtLattice(2. * wafer_r, sl_lattice / 2., h_lattice)
points = points + np.array([
0, h_tri / 3. + gap / 2.
]) # Shift lattice so we can cleave the wafer at y=0
points = points + np.array(startpt) # Shift lattice by starting point if doing an MC search
# Create the lattice of "down" facing triangles by shifting previous lattice
points2 = points + np.array([sl_lattice / 2., h_lattice / 3])
x, y = list(zip(*points))
x2, y2 = list(zip(*points2))
tris1 = self.makeTriang(np.array(x), np.array(y), sl_tri, "up")
tris2 = self.makeTriang(np.array(x2), np.array(y2), sl_tri, "down")
wafer = self.waferShape
upTris = [triangle for triangle in tris1 if triangle.within(wafer)]
downTris = [triangle for triangle in tris2 if triangle.within(wafer)]
return upTris, downTris
def _place_blocks(self):
"""
Create the list of valid block sites based on block size and wafer diam.
"""
sl_tri = self.trisize # Sidelength of the triangular blocks
h_tri = np.sqrt(3.) / 2. * sl_tri # Height of triangular blocks
if self.doMCBlockSearch:
best = [0, 0, 0, 0]
# Iterates many times to find the best fit
for i in range(self.MCBlockIterations):
# Random seed point
# rndpt = (np.random.randint(-sl_tri/2., sl_tri/2.), np.random.randint(-h_tri/2., h_tri/2.))
rndpt = (0, np.random.randint(-h_tri, 0))
# Make cells around this point
upTris, downTris = self.makeBlocks(sl_tri, startpt=rndpt)
NTris = (len(upTris) + len(downTris))
if NTris > best[1]:
centroidDist = np.array([tri.centroid.xy for tri in upTris + downTris]).squeeze().mean(0)
centroidDist = np.sqrt(centroidDist[0] ** 2 + centroidDist[1] ** 2)
# centroidDist = abs(rndpt[1])
best = [rndpt, NTris, (upTris, downTris), centroidDist]
elif NTris == best[1]:
# Choose the pattern that is most centered on the wafer
centroidDist = np.array([tri.centroid.xy for tri in upTris + downTris]).squeeze().mean(0)
centroidDist = np.sqrt(centroidDist[0] ** 2 + centroidDist[1] ** 2)
# centroidDist = abs(rndpt[1])
# print centroidDist
if centroidDist < best[3]:
best = [rndpt, NTris, (upTris, downTris), centroidDist]
# print("Current: {:f}, Best {:f}").format(NTris,best[1])
# Choose the best configuration (fits the most cells)
self.upTris, self.downTris = best[2]
self.blockOffset = best[0]
else:
self.upTris, self.downTris = self.makeBlocks(sl_tri)
self.blockOffset = (0, 0)
# Debugging
self.plotTriangles(self.downTris + self.upTris)
# Find the centers of the triangles
self.upCenters = [list(zip(*tri.centroid.xy)[0]) for tri in self.upTris]
self.downCenters = [list(zip(*tri.centroid.xy)[0]) for tri in self.downTris]
# %%
sl_lattice = self.trisize + self.block_gap / np.tan(np.deg2rad(30))
h_lattice = np.sqrt(3.) / 2. * sl_lattice
base = h_lattice
# Create label for each block (taken from templates._placeblocks)
# String prefixes to associate with each row/column index
x1s, y1s = set(), set()
for tri in self.upTris:
x1s.add(
np.round(tri.centroid.x, 8)
) # In x use centroid as reference, in y use lower bound so up and down triangles give almost the same value
y1s.add(base * round(float(tri.bounds[1]) / base))
# Create dictionary of up and down triangles
self.orientrows = dict(list(zip(y1s, ["up" for i, y in enumerate(y1s)])))
# Create dictionary of up and down triangles
x2s, y2s = set(), set()
for tri in self.downTris:
x2s.add(np.round(tri.centroid.x, 8))
y2s.add(base * round(float(tri.bounds[1]) / base))
self.orientrows.update(dict(list(zip(y2s, ["down" for i, y in enumerate(y2s)
]))))
x1s.update(x2s)
xs = sorted(list(x1s))
self.blockcols = dict(list(zip(xs, [
string.uppercase[i] for i, x in enumerate(xs)
])))
y1s.update(y2s)
ys = sorted(list(y1s))
self.blockrows = dict(list(zip(ys, [str(i) for i, y in enumerate(ys)])))
# Square cell labels ex: "A", "B", "C"...
def add_cellLabels(self, layers, center=False):
if not (type(layers) == list): layers = [layers]
cellLattice = sorted(self.upCellLattice,
key=itemgetter(1, 0)) # Sort the array first
celllabelsUp = Cell('CellLabelsUp')
h = self.cellsize
vOffsetFactor = 1.
txtSize = 200
for i, pt in enumerate(cellLattice):
cellid = string.uppercase[i]
celllabel = Cell('LBL_F_' + cellid)
for l in layers:
txt = Label(cellid, txtSize, layer=l)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
celllabel.add(txt)
if center:
celllabelsUp.add(celllabel) # Middle of cell
else:
celllabelsUp.add(celllabel, origin=(
0, -h / 2. * vOffsetFactor + np.mean(bbox, 0)[1])) # Bottom of cell
for tri in self.upTris:
self.add(celllabelsUp, origin=tri.centroid)
cellLattice = sorted(self.downCellLattice,
key=itemgetter(1, 0),
reverse=True)
celllabelsDown = Cell('CellLabelsDown')
h = self.cellsize
for i, pt in enumerate(cellLattice):
cellid = string.uppercase[i]
celllabel = Cell('LBL_F_' + cellid)
for l in layers:
txt = Label(cellid, txtSize, layer=l)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
if self.symmetric_chips:
txt.rotate(180)
txt.translate(offset) # Translate it to bottom of wafer
celllabel.add(txt)
if center:
celllabelsDown.add(celllabel) # Middle of cell
else:
celllabelsDown.add(celllabel,
origin=(0, -h / 2. * vOffsetFactor + np.mean(bbox, 0)[1])) # Bottom of cell
for tri in self.downTris:
self.add(celllabelsDown, origin=tri.centroid)
# Triangular block labels ex: "A0", "B0", "C0"...
def add_blockLabels(self, layers, center=False):
if not (type(layers) == list): layers = [layers]
vOffsetFactor = 1.
blocklabelsUp = Cell('BlockLabelsUp')
h = self.upTris[0].bounds[3] - self.upTris[0].bounds[1]
sl_lattice = self.trisize + self.block_gap / np.tan(np.deg2rad(30))
h_lattice = np.sqrt(3.) / 2. * sl_lattice
base = h_lattice
for tri in self.upTris:
lbl_col = self.blockcols[np.round(tri.centroid.x, 8)]
lbl_row = self.blockrows[base * round(float(tri.bounds[1]) / base)]
blockid = str(lbl_col) + str(lbl_row)
blocklabel = Cell('LBL_B_' + blockid)
for l in layers:
txt = Label(blockid, 1000, layer=l)
bbox = txt.bounding_box
offset = np.array(tri.centroid)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
blocklabel.add(txt)
blocklabelsUp.add(blocklabel)
if center:
self.add(blocklabelsUp)
else:
self.add(blocklabelsUp, origin=(0, h / 2. * vOffsetFactor))
blocklabelsDown = Cell('BlockLabelsDown')
for tri in self.downTris:
lbl_col = self.blockcols[np.round(tri.centroid.x, 8)]
lbl_row = self.blockrows[base * round(float(tri.bounds[1]) / base)]
blockid = str(lbl_col) + str(lbl_row)
blocklabel = Cell('LBL_' + blockid)
for l in layers:
txt = Label(blockid, 1000, layer=l)
bbox = txt.bounding_box
offset = np.array(tri.centroid)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
if self.symmetric_chips:
txt.rotate(180)
txt.translate(offset) # Translate it to bottom of wafer
blocklabel.add(txt)
blocklabelsDown.add(blocklabel)
if center:
self.add(blocklabelsDown)
else:
self.add(blocklabelsDown, origin=(0, -h / 2. * vOffsetFactor))
def add_sub_dicing_ticks(self, length, thickness, layers):
if not (type(layers) == list): layers = [layers]
l = layers[0]
_h = self.upTris[0].bounds[3] - self.upTris[0].bounds[1]
_w = self.upTris[0].bounds[2] - self.upTris[0].bounds[0]
y_bottom = self.upTris[0].bounds[1]
y_centroid = self.upTris[0].centroid.y
offset = y_centroid - y_bottom
mark = Path([(0, 0), (0, -length)], width=thickness, layer=l)
mark_cell = Cell('SubDicingTick')
mark_cell.add(mark)
tri_sub_dMarks = Cell('TriSubDMarks')
tri_sub_dMarks.add(mark_cell, rotation=30, origin=(0, offset))
tri_sub_dMarks.add(mark_cell, rotation=-30, origin=(0, offset))
tri_sub_dMarks.add(mark_cell, rotation=30, origin=(_w / 4., offset - _h / 2.))
tri_sub_dMarks.add(mark_cell, rotation=90, origin=(_w / 4., offset - _h / 2.))
tri_sub_dMarks.add(mark_cell, rotation=-30, origin=(-_w / 4., offset - _h / 2.))
tri_sub_dMarks.add(mark_cell, rotation=-90, origin=(-_w / 4., offset - _h / 2.))
# Horizontal marks
# This is a mess... should fix it later. Past Martin says sorry...
tri_sub_dMarks.add(mark_cell, rotation=-90, origin=(_w * 3. / 8. - 300., offset - _h / 4. - _h / 20.))
tri_sub_dMarks.add(mark_cell, rotation=90, origin=(-_w * 3. / 8. + 300., offset - _h / 4. - _h / 20.))
tri_sub_dMarks.add(mark_cell, rotation=-90, origin=(_w * 1. / 8. + 300., offset - _h * 3. / 4. + _h / 20.))
tri_sub_dMarks.add(mark_cell, rotation=90, origin=(-_w * 1. / 8. - 300., offset - _h * 3. / 4. + _h / 20.))
for tri in self.downTris:
tri_center = np.array(tri.centroid)
self.add(tri_sub_dMarks, origin=tri_center)
for tri in self.upTris:
tri_center = np.array(tri.centroid)
self.add(tri_sub_dMarks, origin=tri_center, rotation=180)
def add_waferLabel(self, label, layers, pos=None):
"""
Create a label
"""
if not (type(layers) == list): layers = [layers]
if self._label is None:
self._label = Cell(self.name + '_LBL')
self.add(self._label)
else:
self._label.elements = []
offset = np.array([0, -self.wafer_r + self.block_gap]) if pos is None else np.array(pos)
labelsize = 1000.
for l in layers:
txt = LineLabel(label, labelsize, style='romand', line_width=labelsize / 20., layer=l)
bbox = txt.bounding_box
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
self._label.add(txt)
# TODO: create a square cell helper class? Do not confuse with Cell class of gdsCAD
class Blocks(Cell):
"""
Block object in the form of a triangle, facing either up or down
"""
# TODO: add the inner and outer (triangle+gap) polygons to this block for easier use later
def __init__(self, side_len, orient, name, center=[0., 0.]):
super(Blocks, self).__init__(name)
self.center = center
self.orient = orient
self.side_len = side_len
self.height = np.sqrt(3.) / 2. * side_len
self.ptList = self.calcPts()
self.polygon = Polygon(self.ptList)
def calcPts(self):
x, y = self.center
h = self.height
s = self.side_len
if self.orient == "up":
p0 = [x - s / 2., y - h / 3.]
p1 = [x, y + 2. * h / 3.]
p2 = [x + s / 2., y - h / 3.]
else:
p0 = [x - s / 2., y + h / 3.]
p1 = [x, y - 2. * h / 3.]
p2 = [x + s / 2., y + h / 3.]
ptsList = [p0, p1, p2]
return ptsList | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import misc
from tempest.tests import base
@misc.singleton
class TestFoo(object):
count = 0
def increment(self):
self.count += 1
return self.count
@misc.singleton
class TestBar(object):
count = 0
def increment(self):
self.count += 1
return self.count
class TestMisc(base.TestCase):
def test_singleton(self):
test = TestFoo()
self.assertEqual(0, test.count)
self.assertEqual(1, test.increment())
test2 = TestFoo()
self.assertEqual(1, test.count)
self.assertEqual(1, test2.count)
self.assertEqual(test, test2)
test3 = TestBar()
self.assertNotEqual(test, test3)
def test_find_test_caller_test_case(self):
# Calling it from here should give us the method we're in.
self.assertEqual('TestMisc:test_find_test_caller_test_case',
misc.find_test_caller())
def test_find_test_caller_setup_self(self):
def setUp(self):
return misc.find_test_caller()
self.assertEqual('TestMisc:setUp', setUp(self))
def test_find_test_caller_setup_no_self(self):
def setUp():
return misc.find_test_caller()
self.assertEqual(':setUp', setUp())
def test_find_test_caller_setupclass_cls(self):
def setUpClass(cls): # noqa
return misc.find_test_caller()
self.assertEqual('TestMisc:setUpClass', setUpClass(self.__class__))
def test_find_test_caller_teardown_self(self):
def tearDown(self):
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDown', tearDown(self))
def test_find_test_caller_teardown_no_self(self):
def tearDown():
return misc.find_test_caller()
self.assertEqual(':tearDown', tearDown())
def test_find_test_caller_teardown_class(self):
def tearDownClass(cls): # noqa
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDownClass',
tearDownClass(self.__class__)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from taiga.base.fields import JsonField
from taiga.base.api.serializers import ValidationError
from taiga.base.api.serializers import ModelSerializer
from . import models
######################################################
# Custom Attribute Serializer
#######################################################
class BaseCustomAttributeSerializer(ModelSerializer):
class Meta:
read_only_fields = ('id',)
exclude = ('created_date', 'modified_date')
def _validate_integrity_between_project_and_name(self, attrs, source):
"""
Check the name is not duplicated in the project. Check when:
- create a new one
- update the name
- update the project (move to another project)
"""
data_id = attrs.get("id", None)
data_name = attrs.get("name", None)
data_project = attrs.get("project", None)
if self.object:
data_id = data_id or self.object.id
data_name = data_name or self.object.name
data_project = data_project or self.object.project
model = self.Meta.model
qs = (model.objects.filter(project=data_project, name=data_name)
.exclude(id=data_id))
if qs.exists():
raise ValidationError(_("Already exists one with the same name."))
return attrs
def validate_name(self, attrs, source):
return self._validate_integrity_between_project_and_name(attrs, source)
def validate_project(self, attrs, source):
return self._validate_integrity_between_project_and_name(attrs, source)
class UserStoryCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.UserStoryCustomAttribute
class TaskCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.TaskCustomAttribute
class IssueCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.IssueCustomAttribute
######################################################
# Custom Attribute Serializer
#######################################################
class BaseCustomAttributesValuesSerializer(ModelSerializer):
attributes_values = JsonField(source="attributes_values", label="attributes values")
_custom_attribute_model = None
_container_field = None
class Meta:
exclude = ("id",)
def validate_attributes_values(self, attrs, source):
# values must be a dict
data_values = attrs.get("attributes_values", None)
if self.object:
data_values = (data_values or self.object.attributes_values)
if type(data_values) is not dict:
raise ValidationError(_("Invalid content. It must be {\"key\": \"value\",...}"))
# Values keys must be in the container object project
data_container = attrs.get(self._container_field, None)
if data_container:
project_id = data_container.project_id
elif self.object:
project_id = getattr(self.object, self._container_field).project_id
else:
project_id = None
values_ids = list(data_values.keys())
qs = self._custom_attribute_model.objects.filter(project=project_id,
id__in=values_ids)
if qs.count() != len(values_ids):
raise ValidationError(_("It contain invalid custom fields."))
return attrs
class UserStoryCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer):
_custom_attribute_model = models.UserStoryCustomAttribute
_container_model = "userstories.UserStory"
_container_field = "user_story"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.UserStoryCustomAttributesValues
class TaskCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer, ModelSerializer):
_custom_attribute_model = models.TaskCustomAttribute
_container_field = "task"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.TaskCustomAttributesValues
class IssueCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer, ModelSerializer):
_custom_attribute_model = models.IssueCustomAttribute
_container_field = "issue"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.IssueCustomAttributesValues | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.cli.command;
import java.util.EnumSet;
import java.util.Set;
import org.jspecify.annotations.Nullable;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.boot.cli.command.core.HelpCommand;
import org.springframework.boot.cli.command.core.HintCommand;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
import static org.mockito.BDDMockito.then;
import static org.mockito.BDDMockito.willThrow;
import static org.mockito.Mockito.lenient;
/**
* Tests for {@link CommandRunner}.
*
* @author Phillip Webb
* @author Dave Syer
*/
@ExtendWith(MockitoExtension.class)
class CommandRunnerTests {
private CommandRunner commandRunner;
@Mock
@SuppressWarnings("NullAway.Init")
private Command regularCommand;
@Mock
@SuppressWarnings("NullAway.Init")
private Command anotherCommand;
private final Set<Call> calls = EnumSet.noneOf(Call.class);
private ClassLoader loader;
@AfterEach
void close() {
Thread.currentThread().setContextClassLoader(this.loader);
System.clearProperty("debug");
}
@BeforeEach
void setup() {
this.loader = Thread.currentThread().getContextClassLoader();
this.commandRunner = new CommandRunner("spring") {
@Override
protected void showUsage() {
CommandRunnerTests.this.calls.add(Call.SHOW_USAGE);
super.showUsage();
}
@Override
protected boolean errorMessage(@Nullable String message) {
CommandRunnerTests.this.calls.add(Call.ERROR_MESSAGE);
return super.errorMessage(message);
}
@Override
protected void printStackTrace(Exception ex) {
CommandRunnerTests.this.calls.add(Call.PRINT_STACK_TRACE);
super.printStackTrace(ex);
}
};
lenient().doReturn("another").when(this.anotherCommand).getName();
lenient().doReturn("command").when(this.regularCommand).getName();
lenient().doReturn("A regular command").when(this.regularCommand).getDescription();
this.commandRunner.addCommand(this.regularCommand);
this.commandRunner.addCommand(new HelpCommand(this.commandRunner));
this.commandRunner.addCommand(new HintCommand(this.commandRunner));
}
@Test
void runWithoutArguments() {
assertThatExceptionOfType(NoArgumentsException.class).isThrownBy(this.commandRunner::run);
}
@Test
void runCommand() throws Exception {
this.commandRunner.run("command", "--arg1", "arg2");
then(this.regularCommand).should().run("--arg1", "arg2");
}
@Test
void missingCommand() {
assertThatExceptionOfType(NoSuchCommandException.class).isThrownBy(() -> this.commandRunner.run("missing"));
}
@Test
void appArguments() throws Exception {
this.commandRunner.runAndHandleErrors("command", "--", "--debug", "bar");
then(this.regularCommand).should().run("--", "--debug", "bar");
// When handled by the command itself it shouldn't cause the system property to be
// set
assertThat(System.getProperty("debug")).isNull();
}
@Test
void handlesSuccess() {
int status = this.commandRunner.runAndHandleErrors("command");
assertThat(status).isZero();
assertThat(this.calls).isEmpty();
}
@Test
void handlesNoSuchCommand() {
int status = this.commandRunner.runAndHandleErrors("missing");
assertThat(status).isOne();
assertThat(this.calls).containsOnly(Call.ERROR_MESSAGE);
}
@Test
void handlesRegularExceptionWithMessage() throws Exception {
willThrow(new RuntimeException("With Message")).given(this.regularCommand).run();
int status = this.commandRunner.runAndHandleErrors("command");
assertThat(status).isOne();
assertThat(this.calls).containsOnly(Call.ERROR_MESSAGE);
}
@Test
void handlesRegularExceptionWithoutMessage() throws Exception {
willThrow(new RuntimeException()).given(this.regularCommand).run();
int status = this.commandRunner.runAndHandleErrors("command");
assertThat(status).isOne();
assertThat(this.calls).containsOnly(Call.ERROR_MESSAGE, Call.PRINT_STACK_TRACE);
}
@Test
void handlesExceptionWithDashDashDebug() throws Exception {
willThrow(new RuntimeException()).given(this.regularCommand).run();
int status = this.commandRunner.runAndHandleErrors("command", "--debug");
assertThat(System.getProperty("debug")).isEqualTo("true");
assertThat(status).isOne();
assertThat(this.calls).containsOnly(Call.ERROR_MESSAGE, Call.PRINT_STACK_TRACE);
}
@Test
void exceptionMessages() {
assertThat(new NoSuchCommandException("name").getMessage())
.isEqualTo("'name' is not a valid command. See 'help'.");
}
@Test
void help() throws Exception {
this.commandRunner.run("help", "command");
then(this.regularCommand).should().getHelp();
}
@Test
void helpNoCommand() {
assertThatExceptionOfType(NoHelpCommandArgumentsException.class)
.isThrownBy(() -> this.commandRunner.run("help"));
}
@Test
void helpUnknownCommand() {
assertThatExceptionOfType(NoSuchCommandException.class)
.isThrownBy(() -> this.commandRunner.run("help", "missing"));
}
private enum Call {
SHOW_USAGE, ERROR_MESSAGE, PRINT_STACK_TRACE
}
} | java | github | https://github.com/spring-projects/spring-boot | cli/spring-boot-cli/src/test/java/org/springframework/boot/cli/command/CommandRunnerTests.java |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a toy v1 saved model for testing."""
import shutil
from absl import app
from absl import flags
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
flags.DEFINE_string('saved_model_path', '', 'Path to save the model to.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
shutil.rmtree(FLAGS.saved_model_path)
# Create the graph
bf16 = variables.Variable(name='bf16', dtype=dtypes.bfloat16, initial_value=1)
f16 = variables.Variable(name='f16', dtype=dtypes.float16, initial_value=1)
f32 = variables.Variable(name='f32', dtype=dtypes.float32, initial_value=1)
f64 = variables.Variable(name='f64', dtype=dtypes.float64, initial_value=1)
ui8 = variables.Variable(name='ui8', dtype=dtypes.uint8, initial_value=1)
ui16 = variables.Variable(name='ui16', dtype=dtypes.uint16, initial_value=1)
ui32 = variables.Variable(name='ui32', dtype=dtypes.uint32, initial_value=1)
ui64 = variables.Variable(name='ui64', dtype=dtypes.uint64, initial_value=1)
i1 = variables.Variable(name='i1', dtype=dtypes.bool, initial_value=True)
i8 = variables.Variable(name='i8', dtype=dtypes.uint8, initial_value=1)
i16 = variables.Variable(name='i16', dtype=dtypes.uint16, initial_value=1)
i32 = variables.Variable(name='i32', dtype=dtypes.uint32, initial_value=1)
i64 = variables.Variable(name='i64', dtype=dtypes.uint64, initial_value=1)
complex64 = variables.Variable(
name='complex64', dtype=dtypes.complex64, initial_value=1)
complex128 = variables.Variable(
name='complex128', dtype=dtypes.complex128, initial_value=1)
string = variables.Variable(
name='string', dtype=dtypes.string, initial_value='str')
sess = session.Session()
sess.run(variables.global_variables_initializer())
sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path)
r_bf16 = utils.build_tensor_info(bf16.read_value())
r_f16 = utils.build_tensor_info(f16.read_value())
r_f32 = utils.build_tensor_info(f32.read_value())
r_f64 = utils.build_tensor_info(f64.read_value())
r_ui8 = utils.build_tensor_info(ui8.read_value())
r_ui16 = utils.build_tensor_info(ui16.read_value())
r_ui32 = utils.build_tensor_info(ui32.read_value())
r_ui64 = utils.build_tensor_info(ui64.read_value())
r_i1 = utils.build_tensor_info(i1.read_value())
r_i8 = utils.build_tensor_info(i8.read_value())
r_i16 = utils.build_tensor_info(i16.read_value())
r_i32 = utils.build_tensor_info(i32.read_value())
r_i64 = utils.build_tensor_info(i64.read_value())
r_complex64 = utils.build_tensor_info(complex64.read_value())
r_complex128 = utils.build_tensor_info(complex128.read_value())
r_string = utils.build_tensor_info(string.read_value())
toy_signature = (
signature_def_utils.build_signature_def(
outputs={
'r_bf16': r_bf16,
'r_f16': r_f16,
'r_f32': r_f32,
'r_f64': r_f64,
'r_ui8': r_ui8,
'r_ui16': r_ui16,
'r_ui32': r_ui32,
'r_ui64': r_ui64,
'r_i1': r_i1,
'r_i8': r_i8,
'r_i16': r_i16,
'r_i32': r_i32,
'r_i64': r_i64,
'r_complex64': r_complex64,
'r_complex128': r_complex128,
'r_string': r_string,
},
method_name=signature_constants.PREDICT_METHOD_NAME))
sm_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: toy_signature,
},
strip_default_attrs=True)
sm_builder.save()
if __name__ == '__main__':
app.run(main) | python | github | https://github.com/tensorflow/tensorflow | tensorflow/core/tfrt/saved_model/tests/gen_dtype_coverage_v1.py |
# Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner_OSX
LONG = "CoreGraphics"
SHORT = "cg"
OBJECTS = ("CGContextRef",
)
# ADD object typenames here
def main():
input = [
"CGContext.h",
]
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.gentypetest(SHORT+"typetest.py")
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner_OSX):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
if t in OBJECTS and m == "InMode":
classname = "Method"
listname = t + "_methods"
# Special case for the silly first AllocatorRef argument
if t == 'CFAllocatorRef' and m == 'InMode' and len(arglist) > 1:
t, n, m = arglist[1]
if t in OBJECTS and m == "InMode":
classname = "MethodSkipArg1"
listname = t + "_methods"
return classname, listname
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
def makeblacklistnames(self):
return [
"CGContextRetain",
"CGContextRelease",
]
def makegreylist(self):
return []
def makeblacklisttypes(self):
return [
"float_ptr",
"CGRect_ptr",
"CGPoint_ptr",
"CGColorSpaceRef",
"CGColorRenderingIntent",
"CGFontRef",
# "char_ptr",
"CGGlyph_ptr",
"CGImageRef",
"CGPDFDocumentRef",
]
def makerepairinstructions(self):
return [
([("char_ptr", "cstring", "InMode"), ("size_t", "length", "InMode")],
[("InBuffer", "*", "*")]),
# ([("char_ptr", "name", "InMode"),],
# [("CCCCC", "*", "*")]),
]
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/python3
# -*- coding: iso-8859-15 -*-
# *------------------------------------------------------------------
# * axl_zeep.py
# *
# * Cisco AXL Python
# *
# * Copyright (C) 2021 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# *------------------------------------------------------------------
# *
# Import Modules
from lxml import etree
from requests import Session
from requests.auth import HTTPBasicAuth
from zeep import Client, Settings, Plugin, xsd
from zeep.transports import Transport
from zeep.cache import SqliteCache
from zeep.plugins import HistoryPlugin
from zeep.exceptions import Fault
from prettytable import PrettyTable
from configobj import ConfigObj
import getopt
import logging
import sys
import platform
import time
import uuid
import os
import csv
import urllib3
import json
import pprint
class PrettyLog():
def __init__(self, obj):
self.obj = obj
def __repr__(self):
return pprint.pformat(self.obj)
# Argumentos pasados por linea de comandos
def parse_command_line(args):
logger.debug('Ha entrado en la funcion parse_command_line()')
global element_config_file
global cspconfigfile
try:
# Aceptamos
opts, args = getopt.getopt(args[1:],"hc:f:",["help", "config-file=", "csv-file="])
except getopt.GetoptError as err:
print (str(err))
logger.info(get_usage())
sys.exit(2)
"""
* options:
* -c, --config-file <Config file>
"""
for option, args in opts:
if option in ("-h", "--help"):
logger.debug('Mostrando la Ayuda')
logger.info(get_usage())
sys.exit()
elif option in ("-c", "--config-file"):
logger.debug('Se ha pasado un fichero de configuracion')
element_config_file = 'conf/' + args
logger.info('Ha seleccionado el fichero de configuracion: %s' % (element_config_file))
cspconfigfile = ConfigObj(element_config_file)
# No se ha pasado un fichero de configuracion como argumento del script
if(element_config_file==None):
logger.info(get_usage())
csp_table_file=PrettyTable(['id', 'Filename'])
csp_table_id=0
csp_dir = 'conf/'
csp_file = []
logger.debug('Buscamos todos los archivos *.cfg del directorio conf/')
for file in os.listdir(csp_dir):
if file.endswith(".cfg"):
csp_file.append(file)
csp_table_file.add_row([csp_table_id,file])
csp_table_id += 1
logger.debug('El numero de ficheros de configuracion es: %d',csp_table_id)
# Si solo tenemos un fichero de configuracion, vamos a utilizar ese fichero, en caso contrario se pedira que nos digan que fichero de configuracion tenemos que utilizar.
if csp_table_id == 1:
element_config_file = csp_dir + csp_file[0]
logger.info('Ha seleccionado el fichero de configuracion: %s' % (element_config_file))
cspconfigfile = ConfigObj(element_config_file)
return {'Status':True,'Detail': element_config_file}
else:
print (csp_table_file)
csp_file_config = input('Seleccione el archivo de configuracion: ')
if int(csp_file_config) > csp_table_id - 1:
logger.error('Ha seleccionado un fichero erroneo')
return False
else:
element_config_file = csp_dir + csp_file[int(csp_file_config)]
logger.info('Ha seleccionado el fichero de configuracion: %s' % (element_config_file))
cspconfigfile = ConfigObj(element_config_file)
return {'Status':True,'Detail': element_config_file}
return True
# Help function
def get_usage():
logger.debug('Ha entrado en la funcion get_usage()')
return "Uso: -c <Config file>"
# This class lets you view the incoming and outgoing http headers and/or XML
class MyLoggingPlugin(Plugin):
def ingress(self, envelope, http_headers, operation):
print(etree.tostring(envelope, pretty_print=True))
return envelope, http_headers
def egress(self, envelope, http_headers, operation, binding_options):
print(etree.tostring(envelope, pretty_print=True))
return envelope, http_headers
# Funcion para crear el cliente SOAP que atacara a Cisco Unified Communications Manager
def client_soap(config_file):
logger.debug('Ha entrado en la funcion client_soap()')
csp_cmserver = cspconfigfile['CUCM']['server']
csp_username = cspconfigfile['CUCM']['user']
csp_password = cspconfigfile['CUCM']['pass']
csp_version = cspconfigfile['CUCM']['version']
if platform.system() == 'Windows':
logger.debug('El sistema operativo es: %s' % (platform.system()))
wsdl = 'file://' + os.getcwd().replace ("\\","//") + '//Schema//CUCM//' + csp_version + '//AXLAPI.wsdl'
else:
logger.debug('El sistema operativo es: %s' % (platform.system()))
wsdl = 'file://' + os.getcwd() + '/Schema/CUCM/' + csp_version + '/AXLAPI.wsdl'
csp_location = 'https://' + csp_cmserver + '/axl/'
logger.debug('El valor de csp_cmserver es: %s' % (csp_cmserver))
logger.debug('El valor de csp_username es: %s' % (csp_username))
logger.debug('El valor de csp_version es: %s' % (csp_version))
logger.debug('El valor de csp_location es: %s' % (csp_location))
logger.debug('El valor de wsdl es: %s' % (wsdl))
# history shows http_headers
global history
history = HistoryPlugin()
# The first step is to create a SOAP client session
session = Session()
# We avoid certificate verification by default, but you can uncomment and set
# your certificate here, and comment out the False setting
#session.verify = CERT
session.verify = False
session.auth = HTTPBasicAuth(csp_username, csp_password)
transport = Transport(session=session, timeout=10, cache=SqliteCache())
# strict=False is not always necessary, but it allows zeep to parse imperfect XML
settings = Settings(strict=False, xml_huge_tree=True)
try:
csp_soap_client = Client(wsdl,
settings=settings,
transport=transport,
plugins=[MyLoggingPlugin(),history],
)
service = csp_soap_client.create_service("{http://www.cisco.com/AXLAPIService/}AXLAPIBinding", csp_location)
except:
logger.error('Se ha producido un error al crear el cliente soap')
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
sys.exit()
else:
logger.info('Se ha creado el cliente SOAP.')
return service
# Funcion para dar de alta una sede
def AltaSede(logger,csp_soap_client, cspconfigfile):
'''
# *------------------------------------------------------------------
# * function AltaSede(logger,csp_soap_client, cspconfigfile):
# *
# * Copyright (C) 2021 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# *------------------------------------------------------------------
'''
# Main Function
if __name__=='__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)-25s %(name)s [%(process)d]: %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='Log/' + time.strftime("%Y%m%d-%H%M%S-") + str(uuid.uuid4()) + '.log',
filemode='w',
)
urllib3.disable_warnings()
element_config_file = None
history = None
logger = logging.getLogger('cisco.cucm.axl.zeep')
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
#formatter = logging.Formatter('%(asctime)-25s %(name)s [%(process)d]: %(levelname)-8s %(message)s')
formatter = logging.Formatter('%(asctime)-22s | %(filename)s:%(lineno)-4s | %(levelname)-9s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
console.setFormatter(formatter)
console.setLevel=logger.setLevel
logging.getLogger('').addHandler(console)
logger.info('Estamos usando Python v%s' % (platform.python_version()))
'''
logger.debug('This is a debug message %s' % (variable))
logger.info('This is an info message')
logger.warning('This is a warning message')
logger.error('This is an error message')
logger.critical('This is a critical error message')
'''
# Llamamos a la funcion parse_command_line
if not parse_command_line(sys.argv):
logger.error("Error in parsing arguments")
sys.exit(1)
logger.info('Se ha seleccionado el cliente: %s' % (cspconfigfile['INFO']['customer'].upper()))
# Creamos nuestro cliente SOAP con los parametros del fichero de configuracion
service = client_soap(element_config_file)
'''
Codigo para verificar que esta funcionando la conexion SOAP con el CUCM
soap_data = {
'userid': 'enrique.sacido'
}
try:
user_resp = service.getUser(**soap_data)
except Fault as err:
logger.error('Se ha producido un error en la consulta SOAP: %s' % format(err))
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
sys.exit()
else:
logger.info('getUser Response:\n %s' % user_resp)
logger.debug('HTTP Last Send:\n %s' % PrettyLog(history.last_sent))
logger.debug('HTTP Last Received:\n %s' % PrettyLog(history.last_received))
'''
#CiscoCustomer.Customer(logger, csp_soap_client,cspconfigfile)
#Customer(logger, csp_soap_client,cspconfigfile)
logger.info('Se cerrara el programa')
sys.exit() | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.contrib.python.checks.tasks.checkstyle.plugin_test_base import \
CheckstylePluginTestBase
from pants.contrib.python.checks.tasks.checkstyle.print_statements import PrintStatements
class PrintStatementsTest(CheckstylePluginTestBase):
plugin_type = PrintStatements
def test_print_override(self):
statement = """
from __future__ import print_function
print("I do what I want")
class Foo(object):
def print(self):
"I can do this because it's not a reserved word."
"""
self.assertNoNits(statement)
def test_print_function(self):
statement = """
print("I do what I want")
"""
self.assertNoNits(statement)
def test_print_statement(self):
statement = """
print["I do what I want"]
"""
self.assertNit(statement, 'T607') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""Common credentials classes and constructors."""
from __future__ import print_function
import datetime
import json
import os
import threading
import httplib2
import oauth2client
import oauth2client.client
import oauth2client.gce
import oauth2client.locked_file
import oauth2client.multistore_file
import oauth2client.service_account
from oauth2client import tools # for gflags declarations
from six.moves import http_client
from six.moves import urllib
from apitools.base.py import exceptions
from apitools.base.py import util
try:
import gflags
FLAGS = gflags.FLAGS
except ImportError:
FLAGS = None
__all__ = [
'CredentialsFromFile',
'GaeAssertionCredentials',
'GceAssertionCredentials',
'GetCredentials',
'GetUserinfo',
'ServiceAccountCredentials',
'ServiceAccountCredentialsFromFile',
]
# Lock when accessing the cache file to avoid resource contention.
cache_file_lock = threading.Lock()
def SetCredentialsCacheFileLock(lock):
global cache_file_lock # pylint: disable=global-statement
cache_file_lock = lock
# List of additional methods we use when attempting to construct
# credentials. Users can register their own methods here, which we try
# before the defaults.
_CREDENTIALS_METHODS = []
def _RegisterCredentialsMethod(method, position=None):
"""Register a new method for fetching credentials.
This new method should be a function with signature:
client_info, **kwds -> Credentials or None
This method can be used as a decorator, unless position needs to
be supplied.
Note that method must *always* accept arbitrary keyword arguments.
Args:
method: New credential-fetching method.
position: (default: None) Where in the list of methods to
add this; if None, we append. In all but rare cases,
this should be either 0 or None.
Returns:
method, for use as a decorator.
"""
if position is None:
position = len(_CREDENTIALS_METHODS)
else:
position = min(position, len(_CREDENTIALS_METHODS))
_CREDENTIALS_METHODS.insert(position, method)
return method
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent,
credentials_filename=None,
api_key=None, # pylint: disable=unused-argument
client=None, # pylint: disable=unused-argument
oauth2client_args=None,
**kwds):
"""Attempt to get credentials, using an oauth dance as the last resort."""
scopes = util.NormalizeScopes(scopes)
client_info = {
'client_id': client_id,
'client_secret': client_secret,
'scope': ' '.join(sorted(scopes)),
'user_agent': user_agent or '%s-generated/0.1' % package_name,
}
for method in _CREDENTIALS_METHODS:
credentials = method(client_info, **kwds)
if credentials is not None:
return credentials
credentials_filename = credentials_filename or os.path.expanduser(
'~/.apitools.token')
credentials = CredentialsFromFile(credentials_filename, client_info,
oauth2client_args=oauth2client_args)
if credentials is not None:
return credentials
raise exceptions.CredentialsError('Could not create valid credentials')
def ServiceAccountCredentialsFromFile(
service_account_name, private_key_filename, scopes,
service_account_kwargs=None):
with open(private_key_filename) as key_file:
return ServiceAccountCredentials(
service_account_name, key_file.read(), scopes,
service_account_kwargs=service_account_kwargs)
def ServiceAccountCredentials(service_account_name, private_key, scopes,
service_account_kwargs=None):
service_account_kwargs = service_account_kwargs or {}
scopes = util.NormalizeScopes(scopes)
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name, private_key, scopes, **service_account_kwargs)
def _EnsureFileExists(filename):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(filename):
old_umask = os.umask(0o177)
try:
open(filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True
def _GceMetadataRequest(relative_url, use_metadata_ip=False):
"""Request the given url from the GCE metadata service."""
if use_metadata_ip:
base_url = 'http://169.254.169.254/'
else:
base_url = 'http://metadata.google.internal/'
url = base_url + 'computeMetadata/v1/' + relative_url
# Extra header requirement can be found here:
# https://developers.google.com/compute/docs/metadata
headers = {'Metadata-Flavor': 'Google'}
request = urllib.request.Request(url, headers=headers)
opener = urllib.request.build_opener(urllib.request.ProxyHandler({}))
try:
response = opener.open(request)
except urllib.error.URLError as e:
raise exceptions.CommunicationError(
'Could not reach metadata service: %s' % e.reason)
return response
class GceAssertionCredentials(oauth2client.gce.AppAssertionCredentials):
"""Assertion credentials for GCE instances."""
def __init__(self, scopes=None, service_account_name='default', **kwds):
"""Initializes the credentials instance.
Args:
scopes: The scopes to get. If None, whatever scopes that are
available to the instance are used.
service_account_name: The service account to retrieve the scopes
from.
**kwds: Additional keyword args.
"""
# If there is a connectivity issue with the metadata server,
# detection calls may fail even if we've already successfully
# identified these scopes in the same execution. However, the
# available scopes don't change once an instance is created,
# so there is no reason to perform more than one query.
self.__service_account_name = service_account_name
cached_scopes = None
cache_filename = kwds.get('cache_filename')
if cache_filename:
cached_scopes = self._CheckCacheFileForMatch(
cache_filename, scopes)
scopes = cached_scopes or self._ScopesFromMetadataServer(scopes)
if cache_filename and not cached_scopes:
self._WriteCacheFile(cache_filename, scopes)
super(GceAssertionCredentials, self).__init__(scopes, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
def _CheckCacheFileForMatch(self, cache_filename, scopes):
"""Checks the cache file to see if it matches the given credentials.
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
Returns:
List of scopes (if cache matches) or None.
"""
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)) if scopes else None,
'svc_acct_name': self.__service_account_name,
}
with cache_file_lock:
if _EnsureFileExists(cache_filename):
locked_file = oauth2client.locked_file.LockedFile(
cache_filename, 'r+b', 'rb')
try:
locked_file.open_and_lock()
cached_creds_str = locked_file.file_handle().read()
if cached_creds_str:
# Cached credentials metadata dict.
cached_creds = json.loads(cached_creds_str)
if (creds['svc_acct_name'] ==
cached_creds['svc_acct_name']):
if (creds['scopes'] in
(None, cached_creds['scopes'])):
scopes = cached_creds['scopes']
finally:
locked_file.unlock_and_close()
return scopes
def _WriteCacheFile(self, cache_filename, scopes):
"""Writes the credential metadata to the cache file.
This does not save the credentials themselves (CredentialStore class
optionally handles that after this class is initialized).
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
"""
with cache_file_lock:
if _EnsureFileExists(cache_filename):
locked_file = oauth2client.locked_file.LockedFile(
cache_filename, 'r+b', 'rb')
try:
locked_file.open_and_lock()
if locked_file.is_locked():
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)),
'svc_acct_name': self.__service_account_name}
locked_file.file_handle().write(
json.dumps(creds, encoding='ascii'))
# If it's not locked, the locking process will
# write the same data to the file, so just
# continue.
finally:
locked_file.unlock_and_close()
def _ScopesFromMetadataServer(self, scopes):
if not util.DetectGce():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
if not self.GetServiceAccount(self.__service_account_name):
raise exceptions.ResourceUnavailableError(
'GCE credentials requested but service account '
'%s does not exist.' % self.__service_account_name)
if scopes:
scope_ls = util.NormalizeScopes(scopes)
instance_scopes = self.GetInstanceScopes()
if scope_ls > instance_scopes:
raise exceptions.CredentialsError(
'Instance did not have access to scopes %s' % (
sorted(list(scope_ls - instance_scopes)),))
else:
scopes = self.GetInstanceScopes()
return scopes
def GetServiceAccount(self, account):
relative_url = 'instance/service-accounts'
response = _GceMetadataRequest(relative_url)
response_lines = [line.rstrip('/\n\r')
for line in response.readlines()]
return account in response_lines
def GetInstanceScopes(self):
relative_url = 'instance/service-accounts/{0}/scopes'.format(
self.__service_account_name)
response = _GceMetadataRequest(relative_url)
return util.NormalizeScopes(scope.strip()
for scope in response.readlines())
def _refresh(self, do_request):
"""Refresh self.access_token.
This function replaces AppAssertionCredentials._refresh, which
does not use the credential store and is therefore poorly
suited for multi-threaded scenarios.
Args:
do_request: A function matching httplib2.Http.request's signature.
"""
# pylint: disable=protected-access
oauth2client.client.OAuth2Credentials._refresh(self, do_request)
# pylint: enable=protected-access
def _do_refresh_request(self, unused_http_request):
"""Refresh self.access_token by querying the metadata server.
If self.store is initialized, store acquired credentials there.
"""
relative_url = 'instance/service-accounts/{0}/token'.format(
self.__service_account_name)
try:
response = _GceMetadataRequest(relative_url)
except exceptions.CommunicationError:
self.invalid = True
if self.store:
self.store.locked_put(self)
raise
content = response.read()
try:
credential_info = json.loads(content)
except ValueError:
raise exceptions.CredentialsError(
'Could not parse response as JSON: %s' % content)
self.access_token = credential_info['access_token']
if 'expires_in' in credential_info:
expires_in = int(credential_info['expires_in'])
self.token_expiry = (
datetime.timedelta(seconds=expires_in) +
datetime.datetime.utcnow())
else:
self.token_expiry = None
self.invalid = False
if self.store:
self.store.locked_put(self)
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
kwargs = {}
if 'cache_filename' in data.get('kwargs', []):
kwargs['cache_filename'] = data['kwargs']['cache_filename']
credentials = GceAssertionCredentials(scopes=[data['scope']],
**kwargs)
if 'access_token' in data:
credentials.access_token = data['access_token']
if 'token_expiry' in data:
credentials.token_expiry = datetime.datetime.strptime(
data['token_expiry'], oauth2client.client.EXPIRY_FORMAT)
if 'invalid' in data:
credentials.invalid = data['invalid']
return credentials
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
# TODO(craigcitro): Currently, we can't even *load*
# `oauth2client.appengine` without being on appengine, because of how
# it handles imports. Fix that by splitting that module into
# GAE-specific and GAE-independent bits, and guarding imports.
class GaeAssertionCredentials(oauth2client.client.AssertionCredentials):
"""Assertion credentials for Google App Engine apps."""
def __init__(self, scopes, **kwds):
if not util.DetectGae():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
self._scopes = list(util.NormalizeScopes(scopes))
super(GaeAssertionCredentials, self).__init__(None, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return GaeAssertionCredentials(data['_scopes'])
def _refresh(self, _):
"""Refresh self.access_token.
Args:
_: (ignored) A function matching httplib2.Http.request's signature.
"""
from google.appengine.api import app_identity
try:
token, _ = app_identity.get_access_token(self._scopes)
except app_identity.Error as e:
raise exceptions.CredentialsError(str(e))
self.access_token = token
def _GetRunFlowFlags(args=None):
# There's one rare situation where gsutil will not have argparse
# available, but doesn't need anything depending on argparse anyway,
# since they're bringing their own credentials. So we just allow this
# to fail with an ImportError in those cases.
#
# TODO(craigcitro): Move this import back to the top when we drop
# python 2.6 support (eg when gsutil does).
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
# Get command line argparse flags.
flags, _ = parser.parse_known_args(args=args)
# Allow `gflags` and `argparse` to be used side-by-side.
if hasattr(FLAGS, 'auth_host_name'):
flags.auth_host_name = FLAGS.auth_host_name
if hasattr(FLAGS, 'auth_host_port'):
flags.auth_host_port = FLAGS.auth_host_port
if hasattr(FLAGS, 'auth_local_webserver'):
flags.noauth_local_webserver = (not FLAGS.auth_local_webserver)
return flags
# TODO(craigcitro): Switch this from taking a path to taking a stream.
def CredentialsFromFile(path, client_info, oauth2client_args=None):
"""Read credentials from a file."""
credential_store = oauth2client.multistore_file.get_credential_storage(
path,
client_info['client_id'],
client_info['user_agent'],
client_info['scope'])
if hasattr(FLAGS, 'auth_local_webserver'):
FLAGS.auth_local_webserver = False
credentials = credential_store.get()
if credentials is None or credentials.invalid:
print('Generating new OAuth credentials ...')
for _ in range(20):
# If authorization fails, we want to retry, rather than let this
# cascade up and get caught elsewhere. If users want out of the
# retry loop, they can ^C.
try:
flow = oauth2client.client.OAuth2WebServerFlow(**client_info)
flags = _GetRunFlowFlags(args=oauth2client_args)
credentials = tools.run_flow(flow, credential_store, flags)
break
except (oauth2client.client.FlowExchangeError, SystemExit) as e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because
# you reused a token.
print('Invalid authorization: %s' % (e,))
except httplib2.HttpLib2Error as e:
print('Communication error: %s' % (e,))
raise exceptions.CredentialsError(
'Communication error creating credentials: %s' % e)
return credentials
# TODO(craigcitro): Push this into oauth2client.
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name
"""Get the userinfo associated with the given credentials.
This is dependent on the token having either the userinfo.email or
userinfo.profile scope for the given token.
Args:
credentials: (oauth2client.client.Credentials) incoming credentials
http: (httplib2.Http, optional) http instance to use
Returns:
The email address for this token, or None if the required scopes
aren't available.
"""
http = http or httplib2.Http()
url_root = 'https://www.googleapis.com/oauth2/v2/tokeninfo'
query_args = {'access_token': credentials.access_token}
url = '?'.join((url_root, urllib.parse.urlencode(query_args)))
# We ignore communication woes here (i.e. SSL errors, socket
# timeout), as handling these should be done in a common location.
response, content = http.request(url)
if response.status == http_client.BAD_REQUEST:
credentials.refresh(http)
response, content = http.request(url)
return json.loads(content or '{}') # Save ourselves from an empty reply.
@_RegisterCredentialsMethod
def _GetServiceAccountCredentials(
client_info, service_account_name=None, service_account_keyfile=None,
service_account_json_keyfile=None, **unused_kwds):
if ((service_account_name and not service_account_keyfile) or
(service_account_keyfile and not service_account_name)):
raise exceptions.CredentialsError(
'Service account name or keyfile provided without the other')
scopes = client_info['scope'].split()
user_agent = client_info['user_agent']
if service_account_json_keyfile:
with open(service_account_json_keyfile) as keyfile:
service_account_info = json.load(keyfile)
account_type = service_account_info.get('type')
if account_type != oauth2client.client.SERVICE_ACCOUNT:
raise exceptions.CredentialsError(
'Invalid service account credentials: %s' % (
service_account_json_keyfile,))
# pylint: disable=protected-access
credentials = oauth2client.service_account._ServiceAccountCredentials(
service_account_id=service_account_info['client_id'],
service_account_email=service_account_info['client_email'],
private_key_id=service_account_info['private_key_id'],
private_key_pkcs8_text=service_account_info['private_key'],
scopes=scopes, user_agent=user_agent)
# pylint: enable=protected-access
return credentials
if service_account_name is not None:
credentials = ServiceAccountCredentialsFromFile(
service_account_name, service_account_keyfile, scopes,
service_account_kwargs={'user_agent': user_agent})
if credentials is not None:
return credentials
@_RegisterCredentialsMethod
def _GetGaeServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GaeAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetGceServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GceAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetApplicationDefaultCredentials(
client_info, skip_application_default_credentials=False,
**unused_kwds):
scopes = client_info['scope'].split()
if skip_application_default_credentials:
return None
gc = oauth2client.client.GoogleCredentials
with cache_file_lock:
try:
# pylint: disable=protected-access
# We've already done our own check for GAE/GCE
# credentials, we don't want to pay for checking again.
credentials = gc._implicit_credentials_from_files()
except oauth2client.client.ApplicationDefaultCredentialsError:
return None
# If we got back a non-service account credential, we need to use
# a heuristic to decide whether or not the application default
# credential will work for us. We assume that if we're requesting
# cloud-platform, our scopes are a subset of cloud scopes, and the
# ADC will work.
cp = 'https://www.googleapis.com/auth/cloud-platform'
if not isinstance(credentials, gc) or cp in scopes:
return credentials
return None | unknown | codeparrot/codeparrot-clean | ||
import warnings
from sqlalchemy.orm import create_session
from sqlalchemy import select, func
from zkpylons import model
from zkpylons.tests import TestBase, monkeypatch
class ModelTest(TestBase):
"""Base class for all data model domain object tests.
"""
def setUp(self):
super(ModelTest, self).setUp()
self.dbsession = create_session()
def tearDown(self):
self.dbsession.close()
super(ModelTest, self).tearDown()
def check_empty_session(self):
"""Check that the database was left empty after the test"""
results = self.dbsession.query(self.domain).select()
self.assertEqual([], results)
class CRUDModelTestGenerator(type):
"""Monkeypatching metaclass for data model test classes.
This metaclass generates test methods in the target class based on the
class attributes set, to reduce the amount of code needed to be
written to do common model tests, thus improving TDD!
"""
def __init__(cls, name, bases, classdict):
type.__init__(cls, name, bases, classdict)
# Don't try to patch if we're the base class
if not name.startswith('Test'):
return
if 'domain' not in classdict:
#warnings.warn("no domain attribute found in %s" % name, stacklevel=2)
pass
else:
monkeypatch(cls, 'test_crud', 'crud')
class CRUDModelTest(ModelTest):
"""Base class for testing the data model classes.
Derived classes should set the following attributes:
``domain`` is the class (not an instance) that is having it's API
tested.
``samples`` is a list of dictionaries of attributes to use when
creating test model objects.
``mangles`` is a dictionary mapping attributes to functions, for
attributes that are modified by the model object so that the value
returned is not the same as the one set. Set the function to
somethign that mangles the value as you expect, and the test will
check that the returned result is correct.
An example using this base class follows.
class TestSomeModel(CRUDModelTest):
model = model.core.User
samples = [dict(name='testguy',
email_address='test@example.org',
password='test')]
mangles = dict(password=lambda p: md5.new(p).hexdigest())
"""
__metaclass__ = CRUDModelTestGenerator
def additional(self, obj):
"""Perform additional modifications to the model object before saving.
Derived classes can override this to set up dependent objects for CRUD
tests.
"""
return obj
def crud(self):
#
# """Test CRUD operations on data model object.
# This test creates an object of the data model, checks that it was
# inserted into the database, and then deletes it. We don't bother
# testing 'update' because it's assumed that SQLAlchemy provides
# this for us already. We only want to test that our class behaves
# the way we expect it (i.e. contains the data we want, and any
# property methods do the right thing).
# Set the attributes for this model object in the ``samples`` class
# variable.
# If an attribute goes through a mangle process, list it in the
# ``mangles`` dictionary, keyed on the attribute name, and make
# the value on that key a callable that mangles the sample
# data as expected.
# For example,
# class TestSomeModel(ModelTest):
# domain = model.SomeModel
# samples = [dict(password='test')]
# mangles = dict(password=lambda p: md5.new(p).hexdigest())
# """
self.failIf(len(self.samples) < 1,
"not enough sample data, stranger")
for sample in self.samples:
# FIXME: add an inspecty thing to check we're setting only
# function parameters, possibly raising errors if there are
# sample datas without parameters matching.
# instantiating model
o = self.domain(**sample)
# perform additional operations
o = self.additional(o)
print "pending:", self.dbsession.dirty
# committing to db
self.dbsession.save(o)
self.dbsession.flush()
oid = o.id
# clear the session, invalidating o
self.dbsession.clear()
del o
# check it's in the database
print "crud, object is:", self.domain
print "object oid is:", oid
o = self.dbsession.get(self.domain, oid)
self.failIfEqual(None, o, "object not in database")
# checking attributes
for key in sample.keys():
# test each attribute
self.check_attribute(o, key, sample[key])
# deleting object
self.dbsession.delete(o)
print "pending delete:", self.dbsession.deleted
print "dirty:", self.dbsession.dirty
self.dbsession.flush()
# checking db
self.check_empty_session()
self.dbsession.close()
def check_attribute(self, obj, key, value):
"""Check that the attribute has the correct value.
``obj`` is the model class being tested.
``key`` is the name of the attribute being tested.
``value`` is the expected value of the attribute.
This function checks the test's ``mangle`` class dictionary to
modify the ``value if necessary.
"""
print "testing %s.%s is %s" % (obj.__class__.__name__, key, value)
if hasattr(self, 'mangles'):
if key in self.mangles.keys():
value = self.mangles[key](value)
result = getattr(obj, key)
self.assertEqual(value, result,
"unexpected value on attribute '%s': expected '%r', got '%r'" % (key, value, result))
class TableTestGenerator(type):
"""Monkeypatching metaclass for table schema test classes.
This metaclass does some funky class method rewriting to generate
test methods so that one doesn't actually need to do any work to get
table tests written. How awesome is that for TDD? :-)
"""
def __init__(mcs, name, bases, classdict):
type.__init__(mcs, name, bases, classdict)
# Don't try to patch the base class
if not name.startswith('Test'):
return
if 'table' not in classdict:
warnings.warn("no table attribute found in %s" % name, stacklevel=2)
else:
monkeypatch(mcs, 'test_insert', 'insert')
for k in ['not_nullable', 'unique']:
if k + 's' in classdict:
monkeypatch(mcs, 'test_' + k, k)
class TableTest(TestBase):
"""Base class for testing the database schema.
Derived classes should set the following attributes:
``table`` is a string containing the name of the table being tested,
scoped relative to the module ``zkpylons.model``.
``samples`` is a list of dictionaries of columns and their values to use
when inserting a row into the table.
``not_nullables`` is a list of column names that must not be undefined
in the table.
``uniques`` is a list of column names that must uniquely identify
the object.
An example using this base class:
class TestSomeTable(TableTest):
table = 'module.SomeTable'
samples = [dict(name='testguy', email_address='test@example.org')]
not_nullables = ['name']
uniques = ['name', 'email_address']
"""
__metaclass__ = TableTestGenerator
def check_empty_table(self):
"""Check that the database was left empty after the test"""
query = select([func.count(self.table.c.id)])
result = query.execute()
self.assertEqual(0, result.fetchone()[0])
def insert(self):
#"""Test insertion of sample data
#
#Insert a row into the table, check that it was
#inserted into the database, and then delete it.
#
#Set the attributes for this model object in the ``attrs`` class
#variable.
#"""
self.failIf(len(self.samples) < 1, "not enough sample data, stranger")
for sample in self.samples:
print "testing insert of sample data:", sample
query = self.table.insert()
query.execute(sample)
for key in sample.keys():
col = getattr(self.table.c, key)
query = select([col])
result = query.execute()
row = result.fetchone()
print "row:", row
self.assertEqual(sample[key], row[0])
self.table.delete().execute()
# do this again to make sure the test data is all able to go into
# the db, so that we know it's good to do uniqueness tests, for example
for sample in self.samples:
query = self.table.insert()
query.execute(sample)
# get the count of rows
query = select([func.count(self.table.c.id)])
result = query.execute()
# check that it's the same length as the sample data
self.assertEqual(len(self.samples), result.fetchone()[0])
# ok, delete it
self.table.delete().execute()
self.check_empty_table()
def not_nullable(self):
#"""Check that certain columns of a table are not nullable.
#
#Specify the ``not_nullables`` class variable with a list of column names
#that must not be null, and this method will insert into the table rows
#with each set to null and test for an exception from the database layer.
#"""
self.failIf(len(self.samples) < 1, "not enough sample data, stranger")
for col in self.not_nullables:
print "TEST: testing that %s is not nullable" % col
# construct an attribute dictionary without the 'not null' attribute
coldata = {}
coldata.update(self.samples[0])
del coldata[col]
# create the model object
print coldata
query = self.table.insert()
self.assertRaisesAny(query.execute, coldata)
self.table.delete().execute()
self.check_empty_table()
def unique(self):
"""Check that certain attributes of a model object are unique.
Specify the ``uniques`` class variable with a list of attributes
that must be unique, and this method will create two copies of the
model object with that attribute the same and test for an exception
from the database layer.
"""
self.failIf(len(self.samples) < 2, "not enough sample data, stranger")
for col in self.uniques:
self.table.insert().execute(self.samples[0])
attr = {}
attr.update(self.samples[1])
attr[col] = self.samples[0][col]
query = self.table.insert()
self.assertRaisesAny(query.execute, attr)
self.table.delete().execute()
self.check_empty_table()
__all__ = ['TableTest',
'ModelTest', 'CRUDModelTest',
'model',
] | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
#include <ngx_mail.h>
static void *ngx_mail_core_create_main_conf(ngx_conf_t *cf);
static void *ngx_mail_core_create_srv_conf(ngx_conf_t *cf);
static char *ngx_mail_core_merge_srv_conf(ngx_conf_t *cf, void *parent,
void *child);
static char *ngx_mail_core_server(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_mail_core_listen(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_mail_core_protocol(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_mail_core_error_log(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static char *ngx_mail_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd,
void *conf);
static ngx_command_t ngx_mail_core_commands[] = {
{ ngx_string("server"),
NGX_MAIL_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS,
ngx_mail_core_server,
0,
0,
NULL },
{ ngx_string("listen"),
NGX_MAIL_SRV_CONF|NGX_CONF_1MORE,
ngx_mail_core_listen,
NGX_MAIL_SRV_CONF_OFFSET,
0,
NULL },
{ ngx_string("protocol"),
NGX_MAIL_SRV_CONF|NGX_CONF_TAKE1,
ngx_mail_core_protocol,
NGX_MAIL_SRV_CONF_OFFSET,
0,
NULL },
{ ngx_string("timeout"),
NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_TAKE1,
ngx_conf_set_msec_slot,
NGX_MAIL_SRV_CONF_OFFSET,
offsetof(ngx_mail_core_srv_conf_t, timeout),
NULL },
{ ngx_string("server_name"),
NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_TAKE1,
ngx_conf_set_str_slot,
NGX_MAIL_SRV_CONF_OFFSET,
offsetof(ngx_mail_core_srv_conf_t, server_name),
NULL },
{ ngx_string("error_log"),
NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_1MORE,
ngx_mail_core_error_log,
NGX_MAIL_SRV_CONF_OFFSET,
0,
NULL },
{ ngx_string("resolver"),
NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_1MORE,
ngx_mail_core_resolver,
NGX_MAIL_SRV_CONF_OFFSET,
0,
NULL },
{ ngx_string("resolver_timeout"),
NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_TAKE1,
ngx_conf_set_msec_slot,
NGX_MAIL_SRV_CONF_OFFSET,
offsetof(ngx_mail_core_srv_conf_t, resolver_timeout),
NULL },
{ ngx_string("max_errors"),
NGX_MAIL_MAIN_CONF|NGX_MAIL_SRV_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
NGX_MAIL_SRV_CONF_OFFSET,
offsetof(ngx_mail_core_srv_conf_t, max_errors),
NULL },
ngx_null_command
};
static ngx_mail_module_t ngx_mail_core_module_ctx = {
NULL, /* protocol */
ngx_mail_core_create_main_conf, /* create main configuration */
NULL, /* init main configuration */
ngx_mail_core_create_srv_conf, /* create server configuration */
ngx_mail_core_merge_srv_conf /* merge server configuration */
};
ngx_module_t ngx_mail_core_module = {
NGX_MODULE_V1,
&ngx_mail_core_module_ctx, /* module context */
ngx_mail_core_commands, /* module directives */
NGX_MAIL_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static void *
ngx_mail_core_create_main_conf(ngx_conf_t *cf)
{
ngx_mail_core_main_conf_t *cmcf;
cmcf = ngx_pcalloc(cf->pool, sizeof(ngx_mail_core_main_conf_t));
if (cmcf == NULL) {
return NULL;
}
if (ngx_array_init(&cmcf->servers, cf->pool, 4,
sizeof(ngx_mail_core_srv_conf_t *))
!= NGX_OK)
{
return NULL;
}
if (ngx_array_init(&cmcf->listen, cf->pool, 4, sizeof(ngx_mail_listen_t))
!= NGX_OK)
{
return NULL;
}
return cmcf;
}
static void *
ngx_mail_core_create_srv_conf(ngx_conf_t *cf)
{
ngx_mail_core_srv_conf_t *cscf;
cscf = ngx_pcalloc(cf->pool, sizeof(ngx_mail_core_srv_conf_t));
if (cscf == NULL) {
return NULL;
}
/*
* set by ngx_pcalloc():
*
* cscf->protocol = NULL;
* cscf->error_log = NULL;
*/
cscf->timeout = NGX_CONF_UNSET_MSEC;
cscf->resolver_timeout = NGX_CONF_UNSET_MSEC;
cscf->max_errors = NGX_CONF_UNSET_UINT;
cscf->resolver = NGX_CONF_UNSET_PTR;
cscf->file_name = cf->conf_file->file.name.data;
cscf->line = cf->conf_file->line;
return cscf;
}
static char *
ngx_mail_core_merge_srv_conf(ngx_conf_t *cf, void *parent, void *child)
{
ngx_mail_core_srv_conf_t *prev = parent;
ngx_mail_core_srv_conf_t *conf = child;
ngx_conf_merge_msec_value(conf->timeout, prev->timeout, 60000);
ngx_conf_merge_msec_value(conf->resolver_timeout, prev->resolver_timeout,
30000);
ngx_conf_merge_uint_value(conf->max_errors, prev->max_errors, 5);
ngx_conf_merge_str_value(conf->server_name, prev->server_name, "");
if (conf->server_name.len == 0) {
conf->server_name = cf->cycle->hostname;
}
if (conf->protocol == NULL) {
ngx_log_error(NGX_LOG_EMERG, cf->log, 0,
"unknown mail protocol for server in %s:%ui",
conf->file_name, conf->line);
return NGX_CONF_ERROR;
}
if (conf->error_log == NULL) {
if (prev->error_log) {
conf->error_log = prev->error_log;
} else {
conf->error_log = &cf->cycle->new_log;
}
}
ngx_conf_merge_ptr_value(conf->resolver, prev->resolver, NULL);
return NGX_CONF_OK;
}
static char *
ngx_mail_core_server(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
char *rv;
void *mconf;
ngx_uint_t m;
ngx_conf_t pcf;
ngx_mail_module_t *module;
ngx_mail_conf_ctx_t *ctx, *mail_ctx;
ngx_mail_core_srv_conf_t *cscf, **cscfp;
ngx_mail_core_main_conf_t *cmcf;
ctx = ngx_pcalloc(cf->pool, sizeof(ngx_mail_conf_ctx_t));
if (ctx == NULL) {
return NGX_CONF_ERROR;
}
mail_ctx = cf->ctx;
ctx->main_conf = mail_ctx->main_conf;
/* the server{}'s srv_conf */
ctx->srv_conf = ngx_pcalloc(cf->pool, sizeof(void *) * ngx_mail_max_module);
if (ctx->srv_conf == NULL) {
return NGX_CONF_ERROR;
}
for (m = 0; cf->cycle->modules[m]; m++) {
if (cf->cycle->modules[m]->type != NGX_MAIL_MODULE) {
continue;
}
module = cf->cycle->modules[m]->ctx;
if (module->create_srv_conf) {
mconf = module->create_srv_conf(cf);
if (mconf == NULL) {
return NGX_CONF_ERROR;
}
ctx->srv_conf[cf->cycle->modules[m]->ctx_index] = mconf;
}
}
/* the server configuration context */
cscf = ctx->srv_conf[ngx_mail_core_module.ctx_index];
cscf->ctx = ctx;
cmcf = ctx->main_conf[ngx_mail_core_module.ctx_index];
cscfp = ngx_array_push(&cmcf->servers);
if (cscfp == NULL) {
return NGX_CONF_ERROR;
}
*cscfp = cscf;
/* parse inside server{} */
pcf = *cf;
cf->ctx = ctx;
cf->cmd_type = NGX_MAIL_SRV_CONF;
rv = ngx_conf_parse(cf, NULL);
*cf = pcf;
if (rv == NGX_CONF_OK && !cscf->listen) {
ngx_log_error(NGX_LOG_EMERG, cf->log, 0,
"no \"listen\" is defined for server in %s:%ui",
cscf->file_name, cscf->line);
return NGX_CONF_ERROR;
}
return rv;
}
static char *
ngx_mail_core_listen(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_mail_core_srv_conf_t *cscf = conf;
ngx_str_t *value, size;
ngx_url_t u;
ngx_uint_t i, n, m;
ngx_mail_listen_t *ls, *als, *nls;
ngx_mail_module_t *module;
ngx_mail_core_main_conf_t *cmcf;
cscf->listen = 1;
value = cf->args->elts;
ngx_memzero(&u, sizeof(ngx_url_t));
u.url = value[1];
u.listen = 1;
if (ngx_parse_url(cf->pool, &u) != NGX_OK) {
if (u.err) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"%s in \"%V\" of the \"listen\" directive",
u.err, &u.url);
}
return NGX_CONF_ERROR;
}
cmcf = ngx_mail_conf_get_module_main_conf(cf, ngx_mail_core_module);
ls = ngx_array_push(&cmcf->listen);
if (ls == NULL) {
return NGX_CONF_ERROR;
}
ngx_memzero(ls, sizeof(ngx_mail_listen_t));
ls->backlog = NGX_LISTEN_BACKLOG;
ls->rcvbuf = -1;
ls->sndbuf = -1;
ls->ctx = cf->ctx;
#if (NGX_HAVE_INET6)
ls->ipv6only = 1;
#endif
if (cscf->protocol == NULL) {
for (m = 0; cf->cycle->modules[m]; m++) {
if (cf->cycle->modules[m]->type != NGX_MAIL_MODULE) {
continue;
}
module = cf->cycle->modules[m]->ctx;
if (module->protocol == NULL) {
continue;
}
for (i = 0; module->protocol->port[i]; i++) {
if (module->protocol->port[i] == u.port) {
cscf->protocol = module->protocol;
break;
}
}
}
}
for (i = 2; i < cf->args->nelts; i++) {
if (ngx_strcmp(value[i].data, "bind") == 0) {
ls->bind = 1;
continue;
}
if (ngx_strncmp(value[i].data, "backlog=", 8) == 0) {
ls->backlog = ngx_atoi(value[i].data + 8, value[i].len - 8);
ls->bind = 1;
if (ls->backlog == NGX_ERROR || ls->backlog == 0) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid backlog \"%V\"", &value[i]);
return NGX_CONF_ERROR;
}
continue;
}
if (ngx_strncmp(value[i].data, "rcvbuf=", 7) == 0) {
size.len = value[i].len - 7;
size.data = value[i].data + 7;
ls->rcvbuf = ngx_parse_size(&size);
ls->bind = 1;
if (ls->rcvbuf == NGX_ERROR) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid rcvbuf \"%V\"", &value[i]);
return NGX_CONF_ERROR;
}
continue;
}
if (ngx_strncmp(value[i].data, "sndbuf=", 7) == 0) {
size.len = value[i].len - 7;
size.data = value[i].data + 7;
ls->sndbuf = ngx_parse_size(&size);
ls->bind = 1;
if (ls->sndbuf == NGX_ERROR) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid sndbuf \"%V\"", &value[i]);
return NGX_CONF_ERROR;
}
continue;
}
if (ngx_strncmp(value[i].data, "ipv6only=o", 10) == 0) {
#if (NGX_HAVE_INET6 && defined IPV6_V6ONLY)
if (ngx_strcmp(&value[i].data[10], "n") == 0) {
ls->ipv6only = 1;
} else if (ngx_strcmp(&value[i].data[10], "ff") == 0) {
ls->ipv6only = 0;
} else {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid ipv6only flags \"%s\"",
&value[i].data[9]);
return NGX_CONF_ERROR;
}
ls->bind = 1;
continue;
#else
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"ipv6only is not supported "
"on this platform");
return NGX_CONF_ERROR;
#endif
}
if (ngx_strcmp(value[i].data, "ssl") == 0) {
#if (NGX_MAIL_SSL)
ngx_mail_ssl_conf_t *sslcf;
sslcf = ngx_mail_conf_get_module_srv_conf(cf, ngx_mail_ssl_module);
sslcf->listen = 1;
sslcf->file = cf->conf_file->file.name.data;
sslcf->line = cf->conf_file->line;
ls->ssl = 1;
continue;
#else
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"the \"ssl\" parameter requires "
"ngx_mail_ssl_module");
return NGX_CONF_ERROR;
#endif
}
if (ngx_strncmp(value[i].data, "so_keepalive=", 13) == 0) {
if (ngx_strcmp(&value[i].data[13], "on") == 0) {
ls->so_keepalive = 1;
} else if (ngx_strcmp(&value[i].data[13], "off") == 0) {
ls->so_keepalive = 2;
} else {
#if (NGX_HAVE_KEEPALIVE_TUNABLE)
u_char *p, *end;
ngx_str_t s;
end = value[i].data + value[i].len;
s.data = value[i].data + 13;
p = ngx_strlchr(s.data, end, ':');
if (p == NULL) {
p = end;
}
if (p > s.data) {
s.len = p - s.data;
ls->tcp_keepidle = ngx_parse_time(&s, 1);
if (ls->tcp_keepidle == (time_t) NGX_ERROR) {
goto invalid_so_keepalive;
}
}
s.data = (p < end) ? (p + 1) : end;
p = ngx_strlchr(s.data, end, ':');
if (p == NULL) {
p = end;
}
if (p > s.data) {
s.len = p - s.data;
ls->tcp_keepintvl = ngx_parse_time(&s, 1);
if (ls->tcp_keepintvl == (time_t) NGX_ERROR) {
goto invalid_so_keepalive;
}
}
s.data = (p < end) ? (p + 1) : end;
if (s.data < end) {
s.len = end - s.data;
ls->tcp_keepcnt = ngx_atoi(s.data, s.len);
if (ls->tcp_keepcnt == NGX_ERROR) {
goto invalid_so_keepalive;
}
}
if (ls->tcp_keepidle == 0 && ls->tcp_keepintvl == 0
&& ls->tcp_keepcnt == 0)
{
goto invalid_so_keepalive;
}
ls->so_keepalive = 1;
#else
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"the \"so_keepalive\" parameter accepts "
"only \"on\" or \"off\" on this platform");
return NGX_CONF_ERROR;
#endif
}
ls->bind = 1;
continue;
#if (NGX_HAVE_KEEPALIVE_TUNABLE)
invalid_so_keepalive:
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid so_keepalive value: \"%s\"",
&value[i].data[13]);
return NGX_CONF_ERROR;
#endif
}
if (ngx_strcmp(value[i].data, "proxy_protocol") == 0) {
ls->proxy_protocol = 1;
continue;
}
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid parameter \"%V\"", &value[i]);
return NGX_CONF_ERROR;
}
for (n = 0; n < u.naddrs; n++) {
for (i = 0; i < n; i++) {
if (ngx_cmp_sockaddr(u.addrs[n].sockaddr, u.addrs[n].socklen,
u.addrs[i].sockaddr, u.addrs[i].socklen, 1)
== NGX_OK)
{
goto next;
}
}
if (n != 0) {
nls = ngx_array_push(&cmcf->listen);
if (nls == NULL) {
return NGX_CONF_ERROR;
}
*nls = *ls;
} else {
nls = ls;
}
nls->sockaddr = u.addrs[n].sockaddr;
nls->socklen = u.addrs[n].socklen;
nls->addr_text = u.addrs[n].name;
nls->wildcard = ngx_inet_wildcard(nls->sockaddr);
als = cmcf->listen.elts;
for (i = 0; i < cmcf->listen.nelts - 1; i++) {
if (ngx_cmp_sockaddr(als[i].sockaddr, als[i].socklen,
nls->sockaddr, nls->socklen, 1)
!= NGX_OK)
{
continue;
}
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"duplicate \"%V\" address and port pair",
&nls->addr_text);
return NGX_CONF_ERROR;
}
next:
continue;
}
return NGX_CONF_OK;
}
static char *
ngx_mail_core_protocol(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_mail_core_srv_conf_t *cscf = conf;
ngx_str_t *value;
ngx_uint_t m;
ngx_mail_module_t *module;
value = cf->args->elts;
for (m = 0; cf->cycle->modules[m]; m++) {
if (cf->cycle->modules[m]->type != NGX_MAIL_MODULE) {
continue;
}
module = cf->cycle->modules[m]->ctx;
if (module->protocol
&& ngx_strcmp(module->protocol->name.data, value[1].data) == 0)
{
cscf->protocol = module->protocol;
return NGX_CONF_OK;
}
}
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"unknown protocol \"%V\"", &value[1]);
return NGX_CONF_ERROR;
}
static char *
ngx_mail_core_error_log(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_mail_core_srv_conf_t *cscf = conf;
return ngx_log_set_log(cf, &cscf->error_log);
}
static char *
ngx_mail_core_resolver(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_mail_core_srv_conf_t *cscf = conf;
ngx_str_t *value;
value = cf->args->elts;
if (cscf->resolver != NGX_CONF_UNSET_PTR) {
return "is duplicate";
}
if (ngx_strcmp(value[1].data, "off") == 0) {
cscf->resolver = NULL;
return NGX_CONF_OK;
}
cscf->resolver = ngx_resolver_create(cf, &value[1], cf->args->nelts - 1);
if (cscf->resolver == NULL) {
return NGX_CONF_ERROR;
}
return NGX_CONF_OK;
}
char *
ngx_mail_capabilities(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
char *p = conf;
ngx_str_t *c, *value;
ngx_uint_t i;
ngx_array_t *a;
a = (ngx_array_t *) (p + cmd->offset);
value = cf->args->elts;
for (i = 1; i < cf->args->nelts; i++) {
c = ngx_array_push(a);
if (c == NULL) {
return NGX_CONF_ERROR;
}
*c = value[i];
}
return NGX_CONF_OK;
} | c | github | https://github.com/nginx/nginx | src/mail/ngx_mail_core_module.c |
""" Defines classes and functions for working with Qt's rich text system.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import io
import os
import re
# System library imports
from IPython.external.qt import QtGui
# IPython imports
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# A regular expression for an HTML paragraph with no content.
EMPTY_P_RE = re.compile(r'<p[^/>]*>\s*</p>')
# A regular expression for matching images in rich text HTML.
# Note that this is overly restrictive, but Qt's output is predictable...
IMG_RE = re.compile(r'<img src="(?P<name>[\d]+)" />')
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class HtmlExporter(object):
""" A stateful HTML exporter for a Q(Plain)TextEdit.
This class is designed for convenient user interaction.
"""
def __init__(self, control):
""" Creates an HtmlExporter for the given Q(Plain)TextEdit.
"""
assert isinstance(control, (QtGui.QPlainTextEdit, QtGui.QTextEdit))
self.control = control
self.filename = 'ipython.html'
self.image_tag = None
self.inline_png = None
def export(self):
""" Displays a dialog for exporting HTML generated by Qt's rich text
system.
Returns
-------
The name of the file that was saved, or None if no file was saved.
"""
parent = self.control.window()
dialog = QtGui.QFileDialog(parent, 'Save as...')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
filters = [
'HTML with PNG figures (*.html *.htm)',
'XHTML with inline SVG figures (*.xhtml *.xml)'
]
dialog.setNameFilters(filters)
if self.filename:
dialog.selectFile(self.filename)
root,ext = os.path.splitext(self.filename)
if ext.lower() in ('.xml', '.xhtml'):
dialog.selectNameFilter(filters[-1])
if dialog.exec_():
self.filename = dialog.selectedFiles()[0]
choice = dialog.selectedNameFilter()
html = py3compat.cast_unicode(self.control.document().toHtml())
# Configure the exporter.
if choice.startswith('XHTML'):
exporter = export_xhtml
else:
# If there are PNGs, decide how to export them.
inline = self.inline_png
if inline is None and IMG_RE.search(html):
dialog = QtGui.QDialog(parent)
dialog.setWindowTitle('Save as...')
layout = QtGui.QVBoxLayout(dialog)
msg = "Exporting HTML with PNGs"
info = "Would you like inline PNGs (single large html " \
"file) or external image files?"
checkbox = QtGui.QCheckBox("&Don't ask again")
checkbox.setShortcut('D')
ib = QtGui.QPushButton("&Inline")
ib.setShortcut('I')
eb = QtGui.QPushButton("&External")
eb.setShortcut('E')
box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
dialog.windowTitle(), msg)
box.setInformativeText(info)
box.addButton(ib, QtGui.QMessageBox.NoRole)
box.addButton(eb, QtGui.QMessageBox.YesRole)
layout.setSpacing(0)
layout.addWidget(box)
layout.addWidget(checkbox)
dialog.setLayout(layout)
dialog.show()
reply = box.exec_()
dialog.hide()
inline = (reply == 0)
if checkbox.checkState():
# Don't ask anymore; always use this choice.
self.inline_png = inline
exporter = lambda h, f, i: export_html(h, f, i, inline)
# Perform the export!
try:
return exporter(html, self.filename, self.image_tag)
except Exception as e:
msg = "Error exporting HTML to %s\n" % self.filename + str(e)
reply = QtGui.QMessageBox.warning(parent, 'Error', msg,
QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return None
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def export_html(html, filename, image_tag = None, inline = True):
""" Export the contents of the ConsoleWidget as HTML.
Parameters
----------
html : unicode,
A Python unicode string containing the Qt HTML to export.
filename : str
The file to be saved.
image_tag : callable, optional (default None)
Used to convert images. See ``default_image_tag()`` for information.
inline : bool, optional [default True]
If True, include images as inline PNGs. Otherwise, include them as
links to external PNG files, mimicking web browsers' "Web Page,
Complete" behavior.
"""
if image_tag is None:
image_tag = default_image_tag
if inline:
path = None
else:
root,ext = os.path.splitext(filename)
path = root + "_files"
if os.path.isfile(path):
raise OSError("%s exists, but is not a directory." % path)
with io.open(filename, 'w', encoding='utf-8') as f:
html = fix_html(html)
f.write(IMG_RE.sub(lambda x: image_tag(x, path = path, format = "png"),
html))
def export_xhtml(html, filename, image_tag=None):
""" Export the contents of the ConsoleWidget as XHTML with inline SVGs.
Parameters
----------
html : unicode,
A Python unicode string containing the Qt HTML to export.
filename : str
The file to be saved.
image_tag : callable, optional (default None)
Used to convert images. See ``default_image_tag()`` for information.
"""
if image_tag is None:
image_tag = default_image_tag
with io.open(filename, 'w', encoding='utf-8') as f:
# Hack to make xhtml header -- note that we are not doing any check for
# valid XML.
offset = html.find("<html>")
assert offset > -1, 'Invalid HTML string: no <html> tag.'
html = (u'<html xmlns="http://www.w3.org/1999/xhtml">\n'+
html[offset+6:])
html = fix_html(html)
f.write(IMG_RE.sub(lambda x: image_tag(x, path = None, format = "svg"),
html))
def default_image_tag(match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
This default implementation merely removes the image, and exists mostly
for documentation purposes. More information than is present in the Qt
HTML is required to supply the images.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with match.group("Name")
containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be written
(e.g., for linked images). If None, all images are to be included
inline.
format : "png"|"svg", optional [default "png"]
Format for returned or referenced images.
"""
return u''
def fix_html(html):
""" Transforms a Qt-generated HTML string into a standards-compliant one.
Parameters
----------
html : unicode,
A Python unicode string containing the Qt HTML.
"""
# A UTF-8 declaration is needed for proper rendering of some characters
# (e.g., indented commands) when viewing exported HTML on a local system
# (i.e., without seeing an encoding declaration in an HTTP header).
# C.f. http://www.w3.org/International/O-charset for details.
offset = html.find('<head>')
if offset > -1:
html = (html[:offset+6]+
'\n<meta http-equiv="Content-Type" '+
'content="text/html; charset=utf-8" />\n'+
html[offset+6:])
# Replace empty paragraphs tags with line breaks.
html = re.sub(EMPTY_P_RE, '<br/>', html)
return html | unknown | codeparrot/codeparrot-clean | ||
// Helper file to include dispatched functions declaration:
//
// Usage:
// #define CV_CPU_SIMD_FILENAME "<filename>.simd.hpp"
// #define CV_CPU_DISPATCH_MODE AVX2
// #include "opencv2/core/private/cv_cpu_include_simd_declarations.hpp"
// #define CV_CPU_DISPATCH_MODE SSE2
// #include "opencv2/core/private/cv_cpu_include_simd_declarations.hpp"
#ifndef CV_DISABLE_OPTIMIZATION
#ifdef _MSC_VER
#pragma warning(disable: 4702) // unreachable code
#endif
#endif
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#define CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#endif
#undef CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#undef CV_CPU_OPTIMIZATION_NAMESPACE_END
#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace __CV_CAT(opt_, CV_CPU_DISPATCH_MODE) {
#define CV_CPU_OPTIMIZATION_NAMESPACE_END }
#include CV_CPU_SIMD_FILENAME
#undef CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#undef CV_CPU_OPTIMIZATION_NAMESPACE_END
#undef CV_CPU_DISPATCH_MODE | unknown | github | https://github.com/opencv/opencv | modules/core/include/opencv2/core/private/cv_cpu_include_simd_declarations.hpp |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.trait_types import Bool, Instance, Event, Int
from traits.traits import Color
# ============= standard library imports ========================
from datetime import datetime
# ============= local library imports ==========================
from pychron.loggable import Loggable
from pychron.pychron_constants import LIGHT_YELLOW
class Consoleable(Loggable):
use_message_colormapping = Bool
console_display = Instance('pychron.displays.display.DisplayController')
console_updated = Event
console_bgcolor = LIGHT_YELLOW
console_fontsize = Int(11)
console_default_color = Color('black')
def console_bind_preferences(self, prefid):
from pychron.core.ui.preference_binding import color_bind_preference, bind_preference
color_bind_preference(self, 'console_bgcolor', '{}.bgcolor'.format(prefid))
color_bind_preference(self, 'console_default_color', '{}.textcolor'.format(prefid))
bind_preference(self, 'console_fontsize', '{}.fontsize'.format(prefid))
def console_set_preferences(self, preferences, prefid):
from pychron.core.ui.preference_binding import set_preference, color_set_preference
color_set_preference(preferences, self, 'console_bgcolor', '{}.bg_color'.format(prefid))
color_set_preference(preferences, self, 'console_default_color', '{}.textcolor'.format(prefid))
set_preference(preferences, self, 'console_fontsize', '{}.fontsize'.format(prefid), cast=int)
def warning(self, msg, log=True, color=None, *args, **kw):
super(Consoleable, self).warning(msg, *args, **kw)
if color is None:
color = 'red'
msg = msg.upper()
if self.console_display:
self.console_display.add_text(msg, color=color)
self.console_updated = '{}|{}'.format(color, msg)
def heading(self, msg, decorate_chr='*', *args, **kw):
d = decorate_chr * 7
msg = '{} {} {}'.format(d, msg, d)
self.info(msg)
def info(self, msg, log=True, color=None, *args, **kw):
if color is None: # or not self.use_message_colormapping:
color = self.console_default_color
if self.console_display:
t = datetime.now().strftime('%H:%M:%S')
msg = '{} -- {}'.format(t, msg)
self.console_display.add_text(msg, color=color)
if log:
super(Consoleable, self).info(msg, *args, **kw)
self.console_updated = '{}|{}'.format(color, msg)
def info_marker(self, char='=', color=None):
if color is None:
color = self.console_default_color
if self.console_display:
self.console_display.add_marker(char, color=color)
def info_heading(self, msg):
self.info('')
self.info_marker('=')
self.info(msg)
self.info_marker('=')
self.info('')
def _console_display_default(self):
from pychron.displays.display import DisplayController
return DisplayController(
bgcolor=self.console_bgcolor,
font_size=self.console_fontsize,
default_color=self.console_default_color,
max_blocks=100)
# ============= EOF ============================================= | unknown | codeparrot/codeparrot-clean | ||
c-ares security
===============
This document is intended to provide guidance on how security vulnerabilities
should be handled in the c-ares project.
Publishing Information
----------------------
All known and public c-ares vulnerabilities will be listed on [the c-ares web
site](https://c-ares.org/vulns.html).
Security vulnerabilities should not be entered in the project's public bug
tracker unless the necessary configuration is in place to limit access to the
issue to only the reporter and the project's security team.
Vulnerability Handling
----------------------
The typical process for handling a new security vulnerability is as follows.
No information should be made public about a vulnerability until it is
formally announced at the end of this process. That means, for example that a
bug tracker entry must NOT be created to track the issue since that will make
the issue public and it should not be discussed on the project's public
mailing list. Also messages associated with any commits should not make any
reference to the security nature of the commit if done prior to the public
announcement.
- The person discovering the issue, the reporter, reports the vulnerability
privately to `c-ares-security@haxx.se`. That's an email alias that reaches a
handful of selected and trusted people.
- Messages that do not relate to the reporting or managing of an undisclosed
security vulnerability in c-ares are ignored and no further action is
required.
- A person in the security team sends an e-mail to the original reporter to
acknowledge the report.
- The security team investigates the report and either rejects it or accepts
it.
- If the report is rejected, the team writes to the reporter to explain why.
- If the report is accepted, the team writes to the reporter to let them
know it is accepted and that they are working on a fix.
- The security team discusses the problem, works out a fix, considers the
impact of the problem and suggests a release schedule. This discussion
should involve the reporter as much as possible.
- The release of the information should be "as soon as possible" and is most
often synced with an upcoming release that contains the fix. If the
reporter, or anyone else, thinks the next planned release is too far away
then a separate earlier release for security reasons should be considered.
- Write a security advisory draft about the problem that explains what the
problem is, its impact, which versions it affects, solutions or
workarounds, when the release is out and make sure to credit all
contributors properly.
- Request a CVE number from
[distros@openwall](http://oss-security.openwall.org/wiki/mailing-lists/distros)
when also informing and preparing them for the upcoming public security
vulnerability announcement - attach the advisory draft for information. Note
that 'distros' won't accept an embargo longer than 19 days.
- Update the "security advisory" with the CVE number.
- The security team commits the fix in a private branch. The commit message
should ideally contain the CVE number. This fix is usually also distributed
to the 'distros' mailing list to allow them to use the fix prior to the
public announcement.
- At the day of the next release, the private branch is merged into the master
branch and pushed. Once pushed, the information is accessible to the public
and the actual release should follow suit immediately afterwards.
- The project team creates a release that includes the fix.
- The project team announces the release and the vulnerability to the world in
the same manner we always announce releases. It gets sent to the c-ares
mailing list and the oss-security mailing list.
- The security web page on the web site should get the new vulnerability
mentioned.
C-ARES-SECURITY (at haxx dot se)
--------------------------------
Who is on this list? There are a couple of criteria you must meet, and then we
might ask you to join the list or you can ask to join it. It really isn't very
formal. We basically only require that you have a long-term presence in the
c-ares project and you have shown an understanding for the project and its way
of working. You must've been around for a good while and you should have no
plans in vanishing in the near future.
We do not make the list of partipants public mostly because it tends to vary
somewhat over time and a list somewhere will only risk getting outdated. | unknown | github | https://github.com/nodejs/node | deps/cares/SECURITY.md |
# Copyright (C) 2005, 2006 Martin von Löwis
# Licensed to PSF under a Contributor Agreement.
# The bdist_wininst command proper
# based on bdist_wininst
"""
Implements the bdist_msi command.
"""
import sys, os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.sysconfig import get_python_version
from distutils.version import StrictVersion
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
from distutils import log
import msilib
from msilib import schema, sequence, text
from msilib import Directory, Feature, Dialog, add_data
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
#if kw.get("bitmap", True):
# self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 15, 10, 320, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
class bdist_msi(Command):
description = "create a Microsoft Installer (.msi) binary distribution"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4',
'2.5', '2.6', '2.7', '2.8', '2.9',
'3.0', '3.1', '3.2', '3.3', '3.4',
'3.5', '3.6', '3.7', '3.8', '3.9']
other_version = 'X'
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.skip_build = None
self.install_script = None
self.pre_install_script = None
self.versions = None
def finalize_options(self):
self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'msi')
short_version = get_python_version()
if (not self.target_version) and self.distribution.has_ext_modules():
self.target_version = short_version
if self.target_version:
self.versions = [self.target_version]
if not self.skip_build and self.distribution.has_ext_modules()\
and self.target_version != short_version:
raise DistutilsOptionError(
"target version can only be %s, or the '--skip-build'"
" option must be specified" % (short_version,))
else:
self.versions = list(self.all_versions)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.pre_install_script:
raise DistutilsOptionError(
"the pre-install-script feature is not yet implemented")
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError(
"install_script '%s' not found in scripts"
% self.install_script)
self.install_script_key = None
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
self.mkpath(self.dist_dir)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
installer_name = os.path.abspath(installer_name)
if os.path.exists(installer_name): os.unlink(installer_name)
metadata = self.distribution.metadata
author = metadata.author
if not author:
author = metadata.maintainer
if not author:
author = "UNKNOWN"
version = metadata.get_version()
# ProductVersion must be strictly numeric
# XXX need to deal with prerelease versions
sversion = "%d.%d.%d" % StrictVersion(version).version
# Prefix ProductName with Python x.y, so that
# it sorts together with the other Python packages
# in Add-Remove-Programs (APR)
fullname = self.distribution.get_fullname()
if self.target_version:
product_name = "Python %s %s" % (self.target_version, fullname)
else:
product_name = "Python %s" % (fullname)
self.db = msilib.init_database(installer_name, schema,
product_name, msilib.gen_uuid(),
sversion, author)
msilib.add_tables(self.db, sequence)
props = [('DistVersion', version)]
email = metadata.author_email or metadata.maintainer_email
if email:
props.append(("ARPCONTACT", email))
if metadata.url:
props.append(("ARPURLINFOABOUT", metadata.url))
if props:
add_data(self.db, 'Property', props)
self.add_find_python()
self.add_files()
self.add_scripts()
self.add_ui()
self.db.Commit()
if hasattr(self.distribution, 'dist_files'):
tup = 'bdist_msi', self.target_version or 'any', fullname
self.distribution.dist_files.append(tup)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def add_files(self):
db = self.db
cab = msilib.CAB("distfiles")
rootdir = os.path.abspath(self.bdist_dir)
root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
f = Feature(db, "Python", "Python", "Everything",
0, 1, directory="TARGETDIR")
items = [(f, root, '')]
for version in self.versions + [self.other_version]:
target = "TARGETDIR" + version
name = default = "Python" + version
desc = "Everything"
if version is self.other_version:
title = "Python from another location"
level = 2
else:
title = "Python %s from registry" % version
level = 1
f = Feature(db, name, title, desc, 1, level, directory=target)
dir = Directory(db, cab, root, rootdir, target, default)
items.append((f, dir, version))
db.Commit()
seen = {}
for feature, dir, version in items:
todo = [dir]
while todo:
dir = todo.pop()
for file in os.listdir(dir.absolute):
afile = os.path.join(dir.absolute, file)
if os.path.isdir(afile):
short = "%s|%s" % (dir.make_short(file), file)
default = file + version
newdir = Directory(db, cab, dir, file, default, short)
todo.append(newdir)
else:
if not dir.component:
dir.start_component(dir.logical, feature, 0)
if afile not in seen:
key = seen[afile] = dir.add_file(file)
if file==self.install_script:
if self.install_script_key:
raise DistutilsOptionError(
"Multiple files with name %s" % file)
self.install_script_key = '[#%s]' % key
else:
key = seen[afile]
add_data(self.db, "DuplicateFile",
[(key + version, dir.component, key, None, dir.logical)])
db.Commit()
cab.commit(db)
def add_find_python(self):
"""Adds code to the installer to compute the location of Python.
Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the
registry for each version of Python.
Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,
else from PYTHON.MACHINE.X.Y.
Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe"""
start = 402
for ver in self.versions:
install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver
machine_reg = "python.machine." + ver
user_reg = "python.user." + ver
machine_prop = "PYTHON.MACHINE." + ver
user_prop = "PYTHON.USER." + ver
machine_action = "PythonFromMachine" + ver
user_action = "PythonFromUser" + ver
exe_action = "PythonExe" + ver
target_dir_prop = "TARGETDIR" + ver
exe_prop = "PYTHON" + ver
if msilib.Win64:
# type: msidbLocatorTypeRawValue + msidbLocatorType64bit
Type = 2+16
else:
Type = 2
add_data(self.db, "RegLocator",
[(machine_reg, 2, install_path, None, Type),
(user_reg, 1, install_path, None, Type)])
add_data(self.db, "AppSearch",
[(machine_prop, machine_reg),
(user_prop, user_reg)])
add_data(self.db, "CustomAction",
[(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"),
(user_action, 51+256, target_dir_prop, "[" + user_prop + "]"),
(exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"),
])
add_data(self.db, "InstallExecuteSequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "InstallUISequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "Condition",
[("Python" + ver, 0, "NOT TARGETDIR" + ver)])
start += 4
assert start < 500
def add_scripts(self):
if self.install_script:
start = 6800
for ver in self.versions + [self.other_version]:
install_action = "install_script." + ver
exe_prop = "PYTHON" + ver
add_data(self.db, "CustomAction",
[(install_action, 50, exe_prop, self.install_script_key)])
add_data(self.db, "InstallExecuteSequence",
[(install_action, "&Python%s=3" % ver, start)])
start += 1
# XXX pre-install scripts are currently refused in finalize_options()
# but if this feature is completed, it will also need to add
# entries for each version as the above code does
if self.pre_install_script:
scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
f = open(scriptfn, "w")
# The batch file will be executed with [PYTHON], so that %1
# is the path to the Python interpreter; %0 will be the path
# of the batch file.
# rem ="""
# %1 %0
# exit
# """
# <actual script>
f.write('rem ="""\n%1 %0\nexit\n"""\n')
f.write(open(self.pre_install_script).read())
f.close()
add_data(self.db, "Binary",
[("PreInstall", msilib.Binary(scriptfn))
])
add_data(self.db, "CustomAction",
[("PreInstall", 2, "PreInstall", None)
])
add_data(self.db, "InstallExecuteSequence",
[("PreInstall", "NOT Installed", 450)])
def add_ui(self):
db = self.db
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair"),
# possible values: ALL, JUSTME
("WhichUsers", "ALL")
])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
# In the user interface, assume all-users installation if privileged.
("SelectFeaturesDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, 'ActionText', text.ActionText)
add_data(db, 'UIText', text.UIText)
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
#error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
#cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
# "py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 15, 70, 320, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Feature (Python directory) selection
seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Python Installations")
seldlg.text("Hint", 15, 30, 300, 20, 3,
"Select the Python locations where %s should be installed."
% self.distribution.get_fullname())
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
order = 1
c.event("[TARGETDIR]", "[SourceDir]", ordering=order)
for version in self.versions + [self.other_version]:
order += 1
c.event("[TARGETDIR]", "[TARGETDIR%s]" % version,
"FEATURE_SELECTED AND &Python%s=3" % version,
ordering=order)
c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1)
c.event("EndDialog", "Return", ordering=order + 2)
c = seldlg.cancel("Cancel", "Features")
c.event("SpawnDialog", "CancelDlg")
c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3,
"FEATURE", None, "PathEdit", None)
c.event("[FEATURE_SELECTED]", "1")
ver = self.other_version
install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver
dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver
c = seldlg.text("Other", 15, 200, 300, 15, 3,
"Provide an alternate Python location")
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1,
"TARGETDIR" + ver, None, "Next", None)
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
"WhichUsers", "", "Next")
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", ordering = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
###################################################################
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 15, 63, 330, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
"MaintenanceForm_Action", "", "Next")
#g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
#c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
#c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
#c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
self.target_version)
else:
base_name = "%s.%s.msi" % (fullname, self.plat_name)
installer_name = os.path.join(self.dist_dir, base_name)
return installer_name | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'SmileyBarry'
from .core import APIConnection, SteamObject, store
from .decorators import cached_property, INFINITE
class SteamApp(SteamObject):
def __init__(self, appid, name=None, owner=None):
self._id = appid
if name is not None:
import time
self._cache = dict()
self._cache['name'] = (name, time.time())
# Normally, the associated userid is also the owner.
# That would not be the case if the game is borrowed, though. In that case, the object creator
# usually defines attributes accordingly. However, at this time we can't ask the API "is this
# game borrowed?", unless it's the actively-played game, so this distinction isn't done in the
# object's context, but in the object creator's context.
self._owner = owner
self._userid = self._owner
@cached_property(ttl=INFINITE)
def _schema(self):
return APIConnection().call("ISteamUserStats", "GetSchemaForGame", "v2", appid=self._id)
@property
def appid(self):
return self._id
@cached_property(ttl=INFINITE)
def achievements(self):
global_percentages = APIConnection().call("ISteamUserStats", "GetGlobalAchievementPercentagesForApp", "v0002",
gameid=self._id)
if self._userid is not None:
# Ah-ha, this game is associated to a user!
userid = self._userid
unlocks = APIConnection().call("ISteamUserStats",
"GetUserStatsForGame",
"v2",
appid=self._id,
steamid=userid)
if 'achievements' in unlocks.playerstats:
unlocks = [associated_achievement.name
for associated_achievement in unlocks.playerstats.achievements
if associated_achievement.achieved != 0]
else:
userid = None
unlocks = None
achievements_list = []
for achievement in self._schema.game.availableGameStats.achievements:
achievement_obj = SteamAchievement(self._id, achievement.name, achievement.displayName, userid)
achievement_obj._cache = {}
if achievement.hidden == 0:
store(achievement_obj, "is_hidden", False)
else:
store(achievement_obj, "is_hidden", True)
for global_achievement in global_percentages.achievementpercentages.achievements:
if global_achievement.name == achievement.name:
achievement_obj.unlock_percentage = global_achievement.percent
achievements_list += [achievement_obj]
if unlocks is not None:
for achievement in achievements_list:
if achievement.apiname in unlocks:
store(achievement, "is_achieved", True)
else:
store(achievement, "is_achieved", False)
return achievements_list
@cached_property(ttl=INFINITE)
def name(self):
return self._schema.game.gameName
@cached_property(ttl=INFINITE)
def owner(self):
if self._owner is None:
return self._userid
else:
return self._owner
def __str__(self):
return self.name
class SteamAchievement(SteamObject):
def __init__(self, linked_appid, apiname, displayname, linked_userid=None):
self._appid = linked_appid
self._id = apiname
self._displayname = displayname
self._userid = linked_userid
self.unlock_percentage = 0.0
@property
def appid(self):
return self._appid
@property
def name(self):
return self._displayname
@property
def apiname(self):
return self._id
@property
def id(self):
return self._id
@cached_property(ttl=INFINITE)
def is_hidden(self):
response = APIConnection().call("ISteamUserStats",
"GetSchemaForGame",
"v2",
appid=self._appid)
for achievement in response.game.availableGameStats.achievements:
if achievement.name == self._id:
if achievement.hidden == 0:
return False
else:
return True
@cached_property(ttl=INFINITE)
def is_unlocked(self):
if self._userid is None:
raise ValueError("No Steam ID linked to this achievement!")
response = APIConnection().call("ISteamUserStats",
"GetPlayerAchievements",
"v1",
steamid=self._userid,
appid=self._appid,
l="English")
for achievement in response.playerstats.achievements:
if achievement.apiname == self._id:
if achievement.achieved == 1:
return True
else:
return False
# Cannot be found.
return False | unknown | codeparrot/codeparrot-clean | ||
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from rest_framework import exceptions
from tests.utils import assert_latest_log, assert_latest_log_not
from osf.utils import permissions
from api_tests.utils import disconnected_from_listeners
from website.project.signals import contributor_removed
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.enable_implicit_clean
class TestContributorDetail:
@pytest.fixture()
def title(self):
return 'Cool Project'
@pytest.fixture()
def description(self):
return 'A Properly Cool Project'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def project_public(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user
)
@pytest.fixture()
def project_private(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user
)
@pytest.fixture()
def url_public(self, user, project_public):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_public._id, user._id)
@pytest.fixture()
def url_private_base(self, project_private):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_private._id, '{}')
@pytest.fixture()
def url_private(self, user, url_private_base):
return url_private_base.format(user._id)
def test_get_contributor_detail_valid_response(
self, app, user, project_public,
project_private, url_public, url_private):
# test_get_public_contributor_detail
res = app.get(url_public)
assert res.status_code == 200
assert res.json['data']['id'] == '{}-{}'.format(
project_public._id, user._id)
# regression test
# test_get_public_contributor_detail_is_viewable_through_browsable_api
res = app.get(url_public + '?format=api')
assert res.status_code == 200
# test_get_private_node_contributor_detail_contributor_auth
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['id'] == '{}-{}'.format(
project_private._id, user._id)
def test_get_contributor_detail_errors(
self, app, user, url_private_base, url_private):
non_contrib = AuthUserFactory()
# test_get_private_node_contributor_detail_non_contributor
res = app.get(url_private, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_get_private_node_contributor_detail_not_logged_in
res = app.get(url_private, expect_errors=True)
assert res.status_code == 401
# test_get_private_node_non_contributor_detail_contributor_auth
res = app.get(
url_private_base.format(
non_contrib._id),
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
# test_get_private_node_invalid_user_detail_contributor_auth
res = app.get(
url_private_base.format('invalid'),
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
def test_unregistered_contributor_detail_show_up_as_name_associated_with_project(
self,
app,
user):
project = ProjectFactory(creator=user, is_public=True)
project.add_unregistered_contributor(
'Rheisen Dennis',
'reason@gmail.com',
auth=Auth(user),
save=True)
unregistered_contributor = project.contributors[1]
url = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, unregistered_contributor._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['embeds']['users']['data']['attributes']['full_name'] == 'Rheisen Dennis'
assert res.json['data']['attributes'].get(
'unregistered_contributor') == 'Rheisen Dennis'
project_two = ProjectFactory(creator=user, is_public=True)
project_two.add_unregistered_contributor(
'Nesiehr Sinned', 'reason@gmail.com', auth=Auth(user), save=True)
url = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_two._id, unregistered_contributor._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['embeds']['users']['data']['attributes']['full_name'] == 'Rheisen Dennis'
assert res.json['data']['attributes'].get(
'unregistered_contributor') == 'Nesiehr Sinned'
def test_detail_includes_index(
self,
app,
user,
project_public,
url_public):
res = app.get(url_public)
data = res.json['data']
assert 'index' in data['attributes'].keys()
assert data['attributes']['index'] == 0
other_contributor = AuthUserFactory()
project_public.add_contributor(
other_contributor, auth=Auth(user), save=True)
other_contributor_detail = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_public._id, other_contributor._id)
res = app.get(other_contributor_detail)
assert res.json['data']['attributes']['index'] == 1
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestNodeContributorOrdering:
@pytest.fixture()
def contribs(self, user):
return [user] + [AuthUserFactory() for _ in range(9)]
@pytest.fixture()
def project(self, user, contribs):
project = ProjectFactory(creator=user)
for contrib in contribs:
if contrib._id != user._id:
project.add_contributor(
contrib,
permissions=[permissions.READ, permissions.WRITE],
visible=True,
save=True
)
return project
@pytest.fixture()
def url_contrib_base(self, project):
return '/{}nodes/{}/contributors/'.format(API_BASE, project._id)
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def urls_contrib(self, contribs, project):
return [
'/{}nodes/{}/contributors/{}/'.format(
API_BASE,
project._id,
contrib._id) for contrib in contribs]
@pytest.fixture()
def last_position(self, contribs):
return len(contribs) - 1
@staticmethod
@pytest.fixture()
def contrib_user_id():
def get_contrib_user_id(contributor):
return contributor['embeds']['users']['data']['id']
return get_contrib_user_id
def test_initial_order(
self, app, user, contribs, project, contrib_user_id):
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
found_contributors = False
for i in range(len(contribs)):
assert contribs[i]._id == contrib_user_id(contributor_list[i])
assert i == contributor_list[i]['attributes']['index']
found_contributors = True
assert found_contributors, 'Did not compare any contributors.'
def test_move_top_contributor_down_one_and_also_log(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
with assert_latest_log(NodeLog.CONTRIB_REORDERED, project):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get(
'/{}nodes/{}/contributors/'.format(API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[1]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_move_second_contributor_up_one_to_top(
self, app, user, contribs, project,
contrib_user_id, url_contrib_base):
contributor_to_move = contribs[1]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_first_contributor = contribs[0]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_first_contributor._id
def test_move_top_contributor_down_to_bottom(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': last_position
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_move_bottom_contributor_up_to_top(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[last_position]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_to_last_contributor = contribs[last_position - 1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert (
contrib_user_id(contributor_list[last_position]) ==
former_second_to_last_contributor._id)
def test_move_second_to_last_contributor_down_past_bottom(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[last_position - 1]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_last_contributor = contribs[last_position]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': last_position + 10
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position]) == contributor_to_move
assert (
contrib_user_id(contributor_list[last_position - 1]) ==
former_last_contributor._id)
def test_move_top_contributor_down_to_second_to_last_position_with_negative_numbers(
self, app, user, contribs, project, contrib_user_id, last_position, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': -1
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position - 1]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_write_contributor_fails_to_move_top_contributor_down_one(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(
url, data,
auth=former_second_contributor.auth,
expect_errors=True)
assert res_patch.status_code == 403
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_second_contributor._id
def test_non_authenticated_fails_to_move_top_contributor_down_one(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(url, data, expect_errors=True)
assert res_patch.status_code == 401
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_second_contributor._id
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestNodeContributorUpdate:
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
return project
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_contrib(self, project, contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, contrib._id)
def test_change_contrib_errors(
self, app, user, contrib, project, url_contrib):
# test_change_contributor_no_id
data = {
'data': {
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib,
data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
# test_change_contributor_incorrect_id
data = {
'data': {
'id': '12345',
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib,
data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
# test_change_contributor_no_type
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
# test_change_contributor_incorrect_type
data = {
'data': {
'id': contrib._id,
'type': 'Wrong type.',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
# test_invalid_change_inputs_contributor
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': 'invalid',
'bibliographic': 'invalid'
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
# test_change_contributor_not_logged_in
data = {
'data': {
'id': contrib._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, expect_errors=True)
assert res.status_code == 401
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
# test_change_contributor_non_admin_auth
data = {
'data': {
'id': contrib._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
def test_change_admin_self_without_other_admin(
self, app, user, project, url_creator):
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_creator, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
def test_node_update_invalid_data(self, app, user, url_creator):
res = app.put_json_api(
url_creator,
'Incorrect data',
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.put_json_api(
url_creator,
['Incorrect data'],
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_change_contributor_correct_id(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 200
def test_remove_all_bibliographic_statuses_contributors(
self, app, user, contrib, project, url_creator):
project.set_visible(contrib, False, save=True)
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = app.put_json_api(
url_creator, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
project.reload()
assert project.get_visible(user)
def test_change_contributor_permissions(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.ADMIN
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.READ
project.reload()
assert project.get_permissions(contrib) == [permissions.READ]
def test_change_contributor_bibliographic(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
with assert_latest_log(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert not attributes['bibliographic']
project.reload()
assert not project.get_visible(contrib)
with assert_latest_log(NodeLog.MADE_CONTRIBUTOR_VISIBLE, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['bibliographic']
project.reload()
assert project.get_visible(contrib)
def test_change_contributor_permission_and_bibliographic(
self, app, user, contrib, project, url_contrib):
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project, 1), assert_latest_log(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, project):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.READ
assert not attributes['bibliographic']
project.reload()
assert project.get_permissions(contrib) == [permissions.READ]
assert not project.get_visible(contrib)
# @assert_not_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_not_change_contributor(
self, app, user, contrib, project, url_contrib):
with assert_latest_log_not(NodeLog.PERMISSIONS_UPDATED, project):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': None,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
assert attributes['bibliographic']
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
def test_change_admin_self_with_other_admin(
self, app, user, contrib, project, url_creator):
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
project.add_permission(contrib, permissions.ADMIN, save=True)
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(url_creator, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE]
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestNodeContributorPartialUpdate:
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
return project
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_contrib(self, contrib, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, self.project._id, self.user_two._id)
def test_patch_bibliographic_only(self, app, user, project, url_creator):
creator_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': creator_id,
'type': 'contributors',
'attributes': {
'bibliographic': False,
}
}
}
res = app.patch_json_api(url_creator, data, auth=user.auth)
assert res.status_code == 200
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
assert not project.get_visible(user)
def test_patch_permission_only(self, app, user, project):
user_read_contrib = AuthUserFactory()
project.add_contributor(
user_read_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=False,
save=True)
url_read_contrib = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_read_contrib._id)
contributor_id = '{}-{}'.format(project._id, user_read_contrib._id)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
}
}
}
res = app.patch_json_api(url_read_contrib, data, auth=user.auth)
assert res.status_code == 200
project.reload()
assert project.get_permissions(user_read_contrib) == [permissions.READ]
assert not project.get_visible(user_read_contrib)
@pytest.mark.django_db
class TestNodeContributorDelete:
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, user_write_contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
user_write_contrib,
permissions=[permissions.READ, permissions.WRITE],
visible=True, save=True)
return project
@pytest.fixture()
def url_user(self, project, user):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_user_write_contrib(self, project, user_write_contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_write_contrib._id)
@pytest.fixture()
def url_user_non_contrib(self, project, user_non_contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_non_contrib._id)
def test_remove_errors(
self, app, user, user_write_contrib,
user_non_contrib, project, url_user,
url_user_write_contrib, url_user_non_contrib):
# test_remove_contributor_non_contributor
res = app.delete(
url_user_write_contrib,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert user_write_contrib in project.contributors
# test_remove_contributor_not_logged_in
res = app.delete(url_user_write_contrib, expect_errors=True)
assert res.status_code == 401
project.reload()
assert user_write_contrib in project.contributors
# test_remove_non_contributor_admin
assert user_non_contrib not in project.contributors
res = app.delete(
url_user_non_contrib,
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
project.reload()
assert user_non_contrib not in project.contributors
# test_remove_non_existing_user_admin
url_user_fake = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, 'fake')
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user_fake, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_remove_self_contributor_unique_admin
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user, auth=user.auth, expect_errors=True)
assert res.status_code == 400
project.reload()
assert user in project.contributors
def test_can_not_remove_only_bibliographic_contributor(
self, app, user, project, user_write_contrib, url_user):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
project.set_visible(user_write_contrib, False, save=True)
res = app.delete(url_user, auth=user.auth, expect_errors=True)
assert res.status_code == 400
project.reload()
assert user in project.contributors
def test_remove_contributor_non_admin_is_forbidden(
self, app, user_write_contrib,
user_non_contrib, project,
url_user_non_contrib):
project.add_contributor(
user_non_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
res = app.delete(
url_user_non_contrib,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert user_non_contrib in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_contributor_admin(
self, app, user, user_write_contrib,
project, url_user_write_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user_write_contrib, auth=user.auth)
assert res.status_code == 204
project.reload()
assert user_write_contrib not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_non_admin(
self, app, user_non_contrib,
project, url_user_non_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_contributor(
user_non_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(
url_user_non_contrib,
auth=user_non_contrib.auth)
assert res.status_code == 204
project.reload()
assert user_non_contrib not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_contributor_not_unique_admin(
self, app, user, user_write_contrib, project, url_user):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user, auth=user.auth)
assert res.status_code == 204
project.reload()
assert user not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_can_remove_self_as_contributor_not_unique_admin(
self, app, user_write_contrib, project, url_user_write_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(
url_user_write_contrib,
auth=user_write_contrib.auth)
assert res.status_code == 204
project.reload()
assert user_write_contrib not in project.contributors | unknown | codeparrot/codeparrot-clean | ||
"""subprocess management for dak
@copyright: 2013, Ansgar Burchardt <ansgar@debian.org>
@license: GPL-2+
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import signal
import subprocess
#
def fix_signal_handlers():
"""reset signal handlers to default action.
Python changes the signal handler to SIG_IGN for a few signals which
causes unexpected behaviour in child processes. This function resets
them to their default action.
Reference: http://bugs.python.org/issue1652
"""
for signal_name in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
try:
signal_number = getattr(signal, signal_name)
signal.signal(signal_number, signal.SIG_DFL)
except AttributeError:
pass
def _generate_preexec_fn(other_preexec_fn=None):
def preexec_fn():
fix_signal_handlers()
if other_preexec_fn is not None:
other_preexec_fn()
return preexec_fn
def call(*args, **kwargs):
"""wrapper around subprocess.call that fixes signal handling"""
preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
kwargs['preexec_fn'] = preexec_fn
return subprocess.call(*args, **kwargs)
def check_call(*args, **kwargs):
"""wrapper around subprocess.check_call that fixes signal handling"""
preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
kwargs['preexec_fn'] = preexec_fn
return subprocess.check_call(*args, **kwargs)
def check_output(*args, **kwargs):
"""wrapper around subprocess.check_output that fixes signal handling"""
preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
kwargs['preexec_fn'] = preexec_fn
return subprocess.check_output(*args, **kwargs)
def Popen(*args, **kwargs):
"""wrapper around subprocess.Popen that fixes signal handling"""
preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
kwargs['preexec_fn'] = preexec_fn
return subprocess.Popen(*args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_pgjson.fields
class Migration(migrations.Migration):
dependencies = [
('tasks', '0005_auto_20150114_0954'),
('issues', '0004_auto_20150114_0954'),
('userstories', '0009_remove_userstory_is_archived'),
('custom_attributes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='IssueCustomAttributesValues',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('version', models.IntegerField(default=1, verbose_name='version')),
('attributes_values', django_pgjson.fields.JsonField(default={}, verbose_name='attributes_values')),
('issue', models.OneToOneField(verbose_name='issue', to='issues.Issue', related_name='custom_attributes_values')),
],
options={
'verbose_name_plural': 'issue custom attributes values',
'ordering': ['id'],
'verbose_name': 'issue ustom attributes values',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TaskCustomAttributesValues',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('version', models.IntegerField(default=1, verbose_name='version')),
('attributes_values', django_pgjson.fields.JsonField(default={}, verbose_name='attributes_values')),
('task', models.OneToOneField(verbose_name='task', to='tasks.Task', related_name='custom_attributes_values')),
],
options={
'verbose_name_plural': 'task custom attributes values',
'ordering': ['id'],
'verbose_name': 'task ustom attributes values',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserStoryCustomAttributesValues',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('version', models.IntegerField(default=1, verbose_name='version')),
('attributes_values', django_pgjson.fields.JsonField(default={}, verbose_name='attributes_values')),
('user_story', models.OneToOneField(verbose_name='user story', to='userstories.UserStory', related_name='custom_attributes_values')),
],
options={
'verbose_name_plural': 'user story custom attributes values',
'ordering': ['id'],
'verbose_name': 'user story ustom attributes values',
'abstract': False,
},
bases=(models.Model,),
),
] | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
#include <cassert>
#include <cstddef>
#include <functional>
#include <queue>
#include <thread> // NOLINT(build/c++11)
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
// A simple ThreadPool implementation for tests.
class ThreadPool {
public:
explicit ThreadPool(int num_threads) {
threads_.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
}
}
ThreadPool(const ThreadPool &) = delete;
ThreadPool &operator=(const ThreadPool &) = delete;
~ThreadPool() {
{
absl::MutexLock l(&mu_);
for (size_t i = 0; i < threads_.size(); i++) {
queue_.push(nullptr); // Shutdown signal.
}
}
for (auto &t : threads_) {
t.join();
}
}
// Schedule a function to be run on a ThreadPool thread immediately.
void Schedule(absl::AnyInvocable<void()> func) {
assert(func != nullptr);
absl::MutexLock l(&mu_);
queue_.push(std::move(func));
}
private:
bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return !queue_.empty();
}
void WorkLoop() {
while (true) {
absl::AnyInvocable<void()> func;
{
absl::MutexLock l(&mu_);
mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
func = std::move(queue_.front());
queue_.pop();
}
if (func == nullptr) { // Shutdown signal.
break;
}
func();
}
}
absl::Mutex mu_;
std::queue<absl::AnyInvocable<void()>> queue_ ABSL_GUARDED_BY(mu_);
std::vector<std::thread> threads_;
};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_ | c | github | https://github.com/mysql/mysql-server | extra/abseil/abseil-cpp-20230802.1/absl/synchronization/internal/thread_pool.h |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
import tempfile
import zipfile
_PLATFORMS = ["linux-x64", "android-arm"]
_APPS = ["network_service", "network_service_apptests"]
_CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(_CURRENT_PATH, "pylib"))
import gs
script_dir = os.path.dirname(os.path.realpath(__file__))
def download_app(app, version, tools_directory):
prebuilt_directory = os.path.join(script_dir, "prebuilt/%s" % app)
stamp_path = os.path.join(prebuilt_directory, "VERSION")
try:
with open(stamp_path) as stamp_file:
current_version = stamp_file.read().strip()
if current_version == version:
return # Already have the right version.
except IOError:
pass # If the stamp file does not exist we need to download a new binary.
for platform in _PLATFORMS:
download_app_for_platform(app, version, platform, tools_directory)
with open(stamp_path, 'w') as stamp_file:
stamp_file.write(version)
def download_app_for_platform(app, version, platform, tools_directory):
find_depot_tools_path = os.path.join(_CURRENT_PATH, tools_directory)
sys.path.insert(0, find_depot_tools_path)
# pylint: disable=F0401
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
binary_name = app + ".mojo"
gs_path = "gs://mojo/%s/%s/%s/%s.zip" % (app, version, platform, binary_name)
output_directory = os.path.join(script_dir,
"prebuilt/%s/%s" % (app, platform))
with tempfile.NamedTemporaryFile() as temp_zip_file:
gs.download_from_public_bucket(gs_path, temp_zip_file.name,
depot_tools_path)
with zipfile.ZipFile(temp_zip_file.name) as z:
zi = z.getinfo(binary_name)
mode = zi.external_attr >> 16
z.extract(zi, output_directory)
os.chmod(os.path.join(output_directory, binary_name), mode)
def main():
parser = argparse.ArgumentParser(
description="Download prebuilt network service binaries from google " +
"storage")
parser.add_argument("--tools-directory",
dest="tools_directory",
metavar="<tools-directory>",
type=str,
required=True,
help="Path to the directory containing "
"find_depot_tools.py, specified as a relative path "
"from the location of this file.")
args = parser.parse_args()
version_path = os.path.join(script_dir, "NETWORK_SERVICE_VERSION")
with open(version_path) as version_file:
version = version_file.read().strip()
for app in _APPS:
download_app(app, version, args.tools_directory)
return 0
if __name__ == "__main__":
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
import { assert_ok, test } from '../../test';
export default test({
props: {
things: {
foo: ['animal', 'vegetable', 'mineral']
}
},
snapshot(target) {
const ul = target.querySelector('ul');
assert_ok(ul);
const lis = ul.querySelectorAll('li');
return {
ul,
lis0: lis[0],
lis1: lis[1],
lis2: lis[2]
};
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/hydration/samples/each-block-arg-clash/_config.js |
# coding:utf-8
import json
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django_factory_boy import auth
from journalmanager.tests import modelfactories
class DownloadMarkupFilesTests(WebTest):
def setUp(self):
self.user = auth.UserF(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=False)
self.journal = modelfactories.JournalFactory(creator=self.user)
self.journal.join(self.collection, self.user)
def test_non_authenticated_users_are_redirected_to_login_page(self):
response = self.app.get(
reverse('export.markupfiles'),
status=302
).follow()
self.assertTemplateUsed(response, 'registration/login.html')
def test_authenticated_users_can_access(self):
response = self.app.get(
reverse('export.markupfiles'),
user=self.user
)
self.assertTemplateUsed(response, 'export/markup_files.html')
class ListIssuesForMarkupFilesTests(WebTest):
"""
Tests ajax interactions
"""
def setUp(self):
self.user = auth.UserF(is_active=True)
self.collection = modelfactories.CollectionFactory.create()
self.collection.add_user(self.user, is_manager=False)
self.journal = modelfactories.JournalFactory()
self.journal.join(self.collection, self.user)
def test_get_issues_pending_for_markup(self):
"""
This interaction is performed by ajax requests, while
querying for the list of issues waiting to be marked-up.
"""
issue = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=False)
issue2 = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=True)
params = 'j={0}&all=0'.format(self.journal.pk)
response = self.app.get(
reverse('ajx.list_issues_for_markup_files') + '?' + params,
headers={'x-requested-with': 'XMLHttpRequest'},
user=self.user,
expect_errors=True
)
response_data = json.loads(response.content)
self.assertEqual(len(response_data), 1)
def test_get_all_issues(self):
"""
This interaction is performed by ajax requests, while
querying for the list of issues of a given journal.
"""
issue = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=False)
issue2 = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=True)
params = 'j={0}&all=1'.format(self.journal.pk)
response = self.app.get(
reverse('ajx.list_issues_for_markup_files') + '?' + params,
headers={'x-requested-with': 'XMLHttpRequest'},
user=self.user,
expect_errors=True
)
response_data = json.loads(response.content)
self.assertEqual(len(response_data), 2)
def test_get_all_issues_passing_true_as_the_boolean_value(self):
"""
The recommended values to be used as boolean params are 0 and 1.
But we are already prepared to handle: True, true, yes, Yes, on,
On, y, Y. Anything else is handled as False.
"""
issue = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=False)
issue2 = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=True)
params = 'j={0}&all=true'.format(self.journal.pk)
response = self.app.get(
reverse('ajx.list_issues_for_markup_files') + '?' + params,
headers={'x-requested-with': 'XMLHttpRequest'},
user=self.user,
expect_errors=True
)
response_data = json.loads(response.content)
self.assertEqual(len(response_data), 2)
def test_get_all_issues_passing_On_as_the_boolean_value(self):
"""
The recommended values to be used as boolean params are 0 and 1.
But we are already prepared to handle: True, true, yes, Yes, on,
On, y, Y. Anything else is handled as False.
"""
issue = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=False)
issue2 = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=True)
params = 'j={0}&all=on'.format(self.journal.pk)
response = self.app.get(
reverse('ajx.list_issues_for_markup_files') + '?' + params,
headers={'x-requested-with': 'XMLHttpRequest'},
user=self.user,
expect_errors=True
)
response_data = json.loads(response.content)
self.assertEqual(len(response_data), 2)
def test_unknown_values_passed_to_All_are_treated_as_false(self):
"""
The recommended values to be used as boolean params are 0 and 1.
But we are already prepared to handle: True, true, yes, Yes, on,
On, y, Y. Anything else is handled as False.
"""
issue = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=False)
issue2 = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=True)
params = 'j={0}&all=Bzz'.format(self.journal.pk)
response = self.app.get(
reverse('ajx.list_issues_for_markup_files') + '?' + params,
headers={'x-requested-with': 'XMLHttpRequest'},
user=self.user,
expect_errors=True
)
response_data = json.loads(response.content)
self.assertEqual(len(response_data), 1)
def test_non_ajax_requests_gets_a_400_error(self):
issue = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=False)
params = 'j={0}&all=0'.format(self.journal.pk)
response = self.app.get(
reverse('ajx.list_issues_for_markup_files') + '?' + params,
user=self.user,
status=400
)
self.assertEqual(response.status_code, 400)
def test_only_authenticated_users_can_query_issues(self):
"""
Access to the Ajax that returns a list of issues for a
given Journal.
"""
issue = modelfactories.IssueFactory(
journal=self.journal, is_marked_up=False)
params = 'j={0}&all=0'.format(self.journal.pk)
response = self.app.get(
reverse('ajx.list_issues_for_markup_files') + '?' + params,
headers={'x-requested-with': 'XMLHttpRequest'},
status=302
).follow()
self.assertTemplateUsed(response, 'registration/login.html') | unknown | codeparrot/codeparrot-clean | ||
"""
Unit tests for stem.descriptor.export.
"""
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import stem.prereq
import test.runner
from stem.descriptor.export import export_csv, export_csv_file
from test.mocking import (
get_relay_server_descriptor,
get_bridge_server_descriptor,
)
class TestExport(unittest.TestCase):
def test_minimal_descriptor(self):
"""
Exports a single minimal tor server descriptor.
"""
# we won't have a header prior to python 2.7
if not stem.prereq.is_python_27():
test.runner.skip(self, '(header added in python 2.7)')
return
desc = get_relay_server_descriptor()
desc_csv = export_csv(desc, included_fields = ('nickname', 'address', 'published'), header = False)
expected = 'caerSidi,71.35.133.197,2012-03-01 17:15:27\n'
self.assertEqual(expected, desc_csv)
desc_csv = export_csv(desc, included_fields = ('nickname', 'address', 'published'), header = True)
expected = 'nickname,address,published\n' + expected
self.assertEqual(expected, desc_csv)
def test_multiple_descriptors(self):
"""
Exports multiple descriptors, making sure that we get them back in the same
order.
"""
nicknames = ('relay1', 'relay3', 'relay2', 'caerSidi', 'zeus')
descriptors = []
for nickname in nicknames:
router_line = '%s 71.35.133.197 9001 0 0' % nickname
descriptors.append(get_relay_server_descriptor({'router': router_line}))
expected = '\n'.join(nicknames) + '\n'
self.assertEqual(expected, export_csv(descriptors, included_fields = ('nickname',), header = False))
def test_file_output(self):
"""
Basic test for the export_csv_file() function, checking that it provides
the same output as export_csv().
"""
desc = get_relay_server_descriptor()
desc_csv = export_csv(desc)
csv_buffer = StringIO()
export_csv_file(csv_buffer, desc)
self.assertEqual(desc_csv, csv_buffer.getvalue())
def test_excludes_private_attr(self):
"""
Checks that the default attributes for our csv output doesn't include private fields.
"""
# we won't have a header prior to python 2.7
if not stem.prereq.is_python_27():
test.runner.skip(self, '(header added in python 2.7)')
return
desc = get_relay_server_descriptor()
desc_csv = export_csv(desc)
self.assertTrue(',signature' in desc_csv)
self.assertFalse(',_digest' in desc_csv)
self.assertFalse(',_annotation_lines' in desc_csv)
def test_empty_input(self):
"""
Exercises when we don't provide any descriptors.
"""
self.assertEqual('', export_csv([]))
def test_invalid_attributes(self):
"""
Attempts to make a csv with attributes that don't exist.
"""
desc = get_relay_server_descriptor()
self.assertRaises(ValueError, export_csv, desc, ('nickname', 'blarg!'))
def test_multiple_descriptor_types(self):
"""
Attempts to make a csv with multiple descriptor types.
"""
server_desc = get_relay_server_descriptor()
bridge_desc = get_bridge_server_descriptor()
self.assertRaises(ValueError, export_csv, (server_desc, bridge_desc)) | unknown | codeparrot/codeparrot-clean | ||
from lxml import etree
from xmodule.editing_module import XMLEditingDescriptor
from xmodule.xml_module import XmlDescriptor
import logging
import sys
from xblock.fields import String, Scope
from exceptions import SerializationError
log = logging.getLogger(__name__)
class RawDescriptor(XmlDescriptor, XMLEditingDescriptor):
"""
Module that provides a raw editing view of its data and children. It
requires that the definition xml is valid.
"""
data = String(help="XML data for the module", default="", scope=Scope.content)
@classmethod
def definition_from_xml(cls, xml_object, system):
return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, []
def definition_to_xml(self, resource_fs):
try:
return etree.fromstring(self.data)
except etree.XMLSyntaxError as err:
# Can't recover here, so just add some info and
# re-raise
lines = self.data.split('\n')
line, offset = err.position
msg = (u"Unable to create xml for module {loc}. "
"Context: '{context}'".format(
context=lines[line - 1][offset - 40:offset + 40],
loc=self.location))
raise SerializationError(self.location, msg)
class EmptyDataRawDescriptor(XmlDescriptor, XMLEditingDescriptor):
"""
Version of RawDescriptor for modules which may have no XML data,
but use XMLEditingDescriptor for import/export handling.
"""
data = String(default='', scope=Scope.content)
@classmethod
def definition_from_xml(cls, xml_object, system):
if len(xml_object) == 0 and len(xml_object.items()) == 0:
return {'data': ''}, []
return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, []
def definition_to_xml(self, resource_fs):
if self.data:
return etree.fromstring(self.data)
return etree.Element(self.category) | unknown | codeparrot/codeparrot-clean | ||
"""Support for switches using the PiFace Digital I/O module on a RPi."""
import logging
import voluptuous as vol
from homeassistant.components import rpi_pfio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import ATTR_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
ATTR_INVERT_LOGIC = 'invert_logic'
CONF_PORTS = 'ports'
DEFAULT_INVERT_LOGIC = False
PORT_SCHEMA = vol.Schema({
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_PORTS, default={}): vol.Schema({
cv.positive_int: PORT_SCHEMA,
})
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PiFace Digital Output devices."""
switches = []
ports = config.get(CONF_PORTS)
for port, port_entity in ports.items():
name = port_entity.get(ATTR_NAME)
invert_logic = port_entity[ATTR_INVERT_LOGIC]
switches.append(RPiPFIOSwitch(port, name, invert_logic))
add_entities(switches)
class RPiPFIOSwitch(ToggleEntity):
"""Representation of a PiFace Digital Output."""
def __init__(self, port, name, invert_logic):
"""Initialize the pin."""
self._port = port
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._state = False
rpi_pfio.write_output(self._port, 1 if self._invert_logic else 0)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
rpi_pfio.write_output(self._port, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
rpi_pfio.write_output(self._port, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state() | unknown | codeparrot/codeparrot-clean | ||
import re
def parse(markdown):
lines = markdown.split('\n')
html = ''
in_list = False
in_list_append = False
for line in lines:
result = parse_line(line, in_list, in_list_append)
html += result['line']
in_list = result['in_list']
in_list_append = result['in_list_append']
if in_list:
html += '</ul>'
return html
def wrap(line, tag):
return '<{tag}>{line}</{tag}>'.format(line=line, tag=tag)
def check_headers(line):
pattern = '# (.*)'
for index in range(6):
if re.match(pattern, line):
return wrap(line[(index + 2):], 'h' + str(index + 1))
pattern = '#' + pattern
return line
def check_bold(line):
bold_pattern = '(.*)__(.*)__(.*)'
bold_match = re.match(bold_pattern, line)
if bold_match:
return bold_match.group(1) + wrap(bold_match.group(2), 'strong')\
+ bold_match.group(3)
else:
return None
def check_italic(line):
italic_pattern = '(.*)_(.*)_(.*)'
italic_match = re.match(italic_pattern, line)
if italic_match:
return italic_match.group(1) + wrap(italic_match.group(2), 'em')\
+ italic_match.group(3)
else:
return None
def parse_line(line, in_list, in_list_append):
result = check_headers(line)
list_match = re.match(r'\* (.*)', result)
if (list_match):
if not in_list:
result = '<ul>' + wrap(list_match.group(1), 'li')
in_list = True
else:
result = wrap(list_match.group(1), 'li')
else:
if in_list:
in_list_append = True
in_list = False
if not re.match('<h|<ul|<li', result):
result = wrap(result, 'p')
if list_match is None:
result = re.sub('(.*)(<li>)(.*)(</li>)(.*)',
r'\1\2<p>\3</p>\4\5', result)
while check_bold(result):
result = check_bold(result)
while check_italic(result):
result = check_italic(result)
if in_list_append:
result = '</ul>' + result
in_list_append = False
return {
'line': result,
'in_list': in_list,
'in_list_append': in_list_append
} | unknown | codeparrot/codeparrot-clean | ||
module.exports = {
__esModule: true,
abc: "abc",
default: "default"
}; | javascript | github | https://github.com/webpack/webpack | test/cases/cjs-tree-shaking/namespace/namespace-via-literal.js |
"""Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu (jieyu@umich.edu)
"""
import os
from maple.core import logging
from maple.core import proto
def static_info_pb2():
return proto.module('core.static_info_pb2')
class Image(object):
def __init__(self, proto, db):
self.proto = proto
self.db = db
self.offset_map = {}
def id(self):
return self.proto.id
def name(self):
return self.proto.name
def basename(self):
return os.path.basename(self.proto.name)
def shortname(self):
bn = self.basename()
n1 = bn.split('.')[0]
n2 = bn.split('-')[0]
if len(n1) < len(n2):
return n1
else:
return n2
def add_inst(self, inst):
self.offset_map[inst.offset()] = inst
def __str__(self):
content = []
content.append('%-2d' % self.id())
content.append('%s' % self.name())
return ' '.join(content)
class Inst(object):
def __init__(self, proto, db):
self.proto = proto
self.db = db
def id(self):
return self.proto.id
def image(self):
return self.db.image_map[self.proto.image_id]
def offset(self):
return self.proto.offset
def debug_info(self):
if not self.proto.HasField('debug_info'):
return ''
else:
file = os.path.basename(self.proto.debug_info.file_name)
line = self.proto.debug_info.line
content = []
content.append('%s' % file)
content.append('+%d' % line)
return ' '.join(content)
def __str__(self):
content = []
content.append('%-5d' % self.id())
content.append('%-10s' % self.image().shortname())
content.append('0x%-6x' % self.offset())
content.append('%s' % self.debug_info())
return ' '.join(content)
class StaticInfo(object):
def __init__(self):
self.proto = static_info_pb2().StaticInfoProto()
self.image_map = {}
self.inst_map = {}
def load(self, db_name):
if not os.path.exists(db_name):
return
f = open(db_name, 'rb')
self.proto.ParseFromString(f.read())
f.close()
for image_proto in self.proto.image:
image = Image(image_proto, self)
self.image_map[image.id()] = image
for inst_proto in self.proto.inst:
inst = Inst(inst_proto, self)
self.inst_map[inst.id()] = inst
self.image_map[inst.image().id()].add_inst(inst)
def find_image(self, image_id):
return self.image_map[image_id]
def find_inst(self, inst_id):
return self.inst_map[inst_id]
def display_image_table(self, f):
for image in self.image_map.itervalues():
f.write(str(image))
f.write('\n')
def display_inst_table(self, f):
for inst in self.inst_map.itervalues():
f.write(str(inst))
f.write('\n') | unknown | codeparrot/codeparrot-clean | ||
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class TemplateTagTests(SimpleTestCase):
@setup({"templatetag01": "{% templatetag openblock %}"})
def test_templatetag01(self):
output = self.engine.render_to_string("templatetag01")
self.assertEqual(output, "{%")
@setup({"templatetag02": "{% templatetag closeblock %}"})
def test_templatetag02(self):
output = self.engine.render_to_string("templatetag02")
self.assertEqual(output, "%}")
@setup({"templatetag03": "{% templatetag openvariable %}"})
def test_templatetag03(self):
output = self.engine.render_to_string("templatetag03")
self.assertEqual(output, "{{")
@setup({"templatetag04": "{% templatetag closevariable %}"})
def test_templatetag04(self):
output = self.engine.render_to_string("templatetag04")
self.assertEqual(output, "}}")
@setup({"templatetag05": "{% templatetag %}"})
def test_templatetag05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("templatetag05")
@setup({"templatetag06": "{% templatetag foo %}"})
def test_templatetag06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("templatetag06")
@setup({"templatetag07": "{% templatetag openbrace %}"})
def test_templatetag07(self):
output = self.engine.render_to_string("templatetag07")
self.assertEqual(output, "{")
@setup({"templatetag08": "{% templatetag closebrace %}"})
def test_templatetag08(self):
output = self.engine.render_to_string("templatetag08")
self.assertEqual(output, "}")
@setup({"templatetag09": "{% templatetag openbrace %}{% templatetag openbrace %}"})
def test_templatetag09(self):
output = self.engine.render_to_string("templatetag09")
self.assertEqual(output, "{{")
@setup(
{"templatetag10": "{% templatetag closebrace %}{% templatetag closebrace %}"}
)
def test_templatetag10(self):
output = self.engine.render_to_string("templatetag10")
self.assertEqual(output, "}}")
@setup({"templatetag11": "{% templatetag opencomment %}"})
def test_templatetag11(self):
output = self.engine.render_to_string("templatetag11")
self.assertEqual(output, "{#")
@setup({"templatetag12": "{% templatetag closecomment %}"})
def test_templatetag12(self):
output = self.engine.render_to_string("templatetag12")
self.assertEqual(output, "#}") | python | github | https://github.com/django/django | tests/template_tests/syntax_tests/test_template_tag.py |
#!/usr/bin/python24
import cgi
import time
import MySQLdb
from traceback import format_exception
from sys import exc_info
from string import split
from string import strip
from sys import exit
from urllib import urlencode
import urllib2
DATADIR = "/home/user/data/"
PP_URL = "https://www.sandbox.paypal.com/cgi-bin/webscr"
#PP_URL = "https://www.paypal.com/cgi-bin/webscr"
# non testing is www.paypal.com and /cgi-bin/webscr
# note we used the fields custom and option_selection1 and
# option_selection2 to pass item characteristics
def confirm_paypal(f,f1):
# f is the form handle to the cgi form passed by paypal
# f1 is a file handle to a log text file
newparams={}
for key in f.keys():
newparams[key]=f[key].value
newparams["cmd"]="_notify-validate"
params=urlencode(newparams)
f1.write(params + "\n")
f1.write(PP_URL + "\n")
req = urllib2.Request(PP_URL)
req.add_header("Content-type", "application/x-www-form-urlencoded")
fo = urllib2.urlopen(PP_URL, params)
ret = fo.read()
if ret == "VERIFIED":
f1.write(" verified send back ok\n")
print "Status: 200 Ok\n"
else:
f1.write(" ERROR did not verify\n")
exit(0)
return ret
def write_db(f, f1):
f1.write("... updating database\n")
try:
invoice = f['invoice'].value
try:
street = f['address_street'].value
city = f['address_city'].value
zipc = f['address_zip'].value
country = f["address_country_code"].value
firstn = f['first_name'].value
lastn = f['last_name'].value
except KeyError:
street = ""
city = ""
zipc = ""
country = ""
firstn = ""
lastn = ""
try:
#some countries don't have states
state = f['address_state'].value
except KeyError:
state =""
if f.has_key("custom"):
payer_url = f["custom"].value
query = "INSERT INTO names VALUES ('" + invoice + "', '" + \
firstn + "', '" + lastn + "', '" + street + "', '" + city + "', '" + state + "', '" + zipc + "', '" + \
country + "', '" + f['payer_email'].value + "', '" + \
payer_url + "', '" + f['option_selection1'].value + "', '" + f['option_selection2'].value + "')"
f1.write(query + "\n")
db = MySQLdb.connect(host="localhost", user="username", passwd="passwd",db="db")
cursor = db.cursor()
cursor.execute (query)
except:
f1.write(''.join(format_exception(*exc_info())))
if __name__=="__main__":
import cgitb; cgitb.enable()
#can disable cgitb if not req.
f1 = open(DATADIR + "log1.txt",'a')
f1.write("############ " +str(time.ctime(time.time())) + " starting request\n ")
try:
f = cgi.FieldStorage()
f1.write(repr(f) + "\n\n")
a = confirm_paypal(f, f1)
if not f['payment_status'].value == "Completed":
# We want want to respond to anything that isn't a payment - but we won't insert into our database
f1.write("### Not Completed so going to exit....\n")
exit(0)
else:
f1.write("### Completed so going to write data...\n")
write_db(f, f1)
except:
f1.write(''.join(format_exception(*exc_info()))) | unknown | codeparrot/codeparrot-clean | ||
{
"applyable": true,
"complete": true,
"configuration": {
"provider_config": {
"tfcoremock": {
"full_name": "registry.terraform.io/hashicorp/tfcoremock",
"name": "tfcoremock",
"version_constraint": "0.1.1"
}
},
"root_module": {
"resources": [
{
"address": "tfcoremock_map.map",
"expressions": {
"id": {
"constant_value": "50E1A46E-E64A-4C1F-881C-BA85A5440964"
}
},
"mode": "managed",
"name": "map",
"provider_config_key": "tfcoremock",
"schema_version": 0,
"type": "tfcoremock_map"
}
]
}
},
"errored": false,
"format_version": "1.2",
"planned_values": {
"root_module": {
"resources": [
{
"address": "tfcoremock_map.map",
"mode": "managed",
"name": "map",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"schema_version": 0,
"sensitive_values": {},
"type": "tfcoremock_map",
"values": {
"id": "50E1A46E-E64A-4C1F-881C-BA85A5440964",
"map": null
}
}
]
}
},
"prior_state": {
"format_version": "1.0",
"values": {
"root_module": {
"resources": [
{
"address": "tfcoremock_map.map",
"mode": "managed",
"name": "map",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"schema_version": 0,
"sensitive_values": {
"map": {}
},
"type": "tfcoremock_map",
"values": {
"id": "50E1A46E-E64A-4C1F-881C-BA85A5440964",
"map": {
"one": "682672C7-0918-4448-8342-887BAE01062A",
"two": "212FFBF6-40FE-4862-B708-E6AA508E84E0",
"zero": "6B044AF7-172B-495B-BE11-B9546C12C3BD"
}
}
}
]
}
}
},
"resource_changes": [
{
"address": "tfcoremock_map.map",
"change": {
"actions": [
"update"
],
"after": {
"id": "50E1A46E-E64A-4C1F-881C-BA85A5440964",
"map": null
},
"after_sensitive": {},
"after_unknown": {},
"before": {
"id": "50E1A46E-E64A-4C1F-881C-BA85A5440964",
"map": {
"one": "682672C7-0918-4448-8342-887BAE01062A",
"two": "212FFBF6-40FE-4862-B708-E6AA508E84E0",
"zero": "6B044AF7-172B-495B-BE11-B9546C12C3BD"
}
},
"before_sensitive": {
"map": {}
}
},
"mode": "managed",
"name": "map",
"provider_name": "registry.terraform.io/hashicorp/tfcoremock",
"type": "tfcoremock_map"
}
]
} | json | github | https://github.com/hashicorp/terraform | testing/equivalence-tests/outputs/basic_map_null/plan.json |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_ClusterRoleBinding(obj *rbacv1.ClusterRoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_RoleBinding(obj *rbacv1.RoleBinding) {
if len(obj.RoleRef.APIGroup) == 0 {
obj.RoleRef.APIGroup = GroupName
}
}
func SetDefaults_Subject(obj *rbacv1.Subject) {
if len(obj.APIGroup) == 0 {
switch obj.Kind {
case rbacv1.ServiceAccountKind:
obj.APIGroup = ""
case rbacv1.UserKind:
obj.APIGroup = GroupName
case rbacv1.GroupKind:
obj.APIGroup = GroupName
}
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/rbac/v1/defaults.go |
[data-foo|='bar'].svelte-xyz {
color: red;
} | css | github | https://github.com/sveltejs/svelte | packages/svelte/tests/css/samples/omit-scoping-attribute-attribute-selector-pipe-equals/expected.css |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url # noqa
from gbpui.panels.network_services import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create_service_profile$',
views.CreateServiceProfileView.as_view(),
name='create_service_profile'),
url(r'^serviceprofile/(?P<sp_id>[^/]+)/$',
views.ServiceProfileDetailsView.as_view(),
name='service_profile_details'),
url(r'^create_sc_node$',
views.CreateServiceChainNodeView.as_view(),
name='create_sc_node'),
url(r'^update_sc_node/(?P<scnode_id>[^/]+)/$',
views.UpdateServiceChainNodeView.as_view(),
name='update_sc_node'),
url(r'^sc_node/(?P<scnode_id>[^/]+)/$',
views.ServiceChainNodeDetailsView.as_view(),
name='sc_node_details'),
url(r'^create_sc_spec$',
views.CreateServiceChainSpecView.as_view(),
name='create_sc_spec'),
url(r'^update_sc_spec/(?P<scspec_id>[^/]+)/$',
views.UpdateServiceChainSpecView.as_view(),
name='update_sc_spec'),
url(r'^sc_spec/(?P<scspec_id>[^/]+)/$',
views.ServiceChainSpecDetailsView.as_view(),
name='sc_spec_details'),
url(r'^sc_instance/(?P<scinstance_id>[^/]+)/$',
views.ServiceChainInstanceDetailsView.as_view(),
name='sc_instance_details'),
] | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Test Creator role with Program scoped roles
"""
from ggrc import db
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import Generator
from integration.ggrc.generator import ObjectGenerator
from integration.ggrc.models import factories
class TestCreatorProgram(TestCase):
"""Set up necessary objects and test Creator role with Program roles"""
def setUp(self):
super(TestCreatorProgram, self).setUp()
self.generator = Generator()
self.api = Api()
self.object_generator = ObjectGenerator()
self.init_users()
self.init_roles()
self.init_test_cases()
self.objects = {}
def init_test_cases(self):
""" Create a dict of all possible test cases """
self.test_cases = {
"notmapped": {
"objects": {
"program": {
"get": 403,
"put": 403,
"delete": 403
},
"mapped_object": {
"get": 403,
"put": 403,
"delete": 403
},
"unrelated": {
"get": 403,
"put": 403,
"delete": 403,
"map": 403,
}
},
},
"mapped": {
"objects": {
"program": {
"get": 403,
"put": 403,
"delete": 403
},
"mapped_object": {
"get": 403,
"put": 403,
"delete": 403
},
"unrelated": {
"get": 403,
"put": 403,
"delete": 403,
"map": 403,
}
}
},
"ProgramReader": {
"program_role": "ProgramReader",
"objects": {
"program": {
"get": 200,
"put": 403,
"delete": 403
},
"mapped_object": {
"get": 200,
"put": 403,
"delete": 403
},
"unrelated": {
"get": 403,
"put": 403,
"delete": 403,
"map": 403,
}
}
},
"ProgramOwner": {
"program_role": "ProgramOwner",
"objects": {
"program": {
"get": 200,
"put": 200,
"delete": 200
},
"mapped_object": {
"get": 200,
"put": 200,
"delete": 200,
},
"unrelated": {
"get": 403,
"put": 403,
"delete": 403,
"map": 403,
}
}
},
"ProgramEditor": {
"program_role": "ProgramEditor",
"objects": {
"program": {
"get": 200,
"put": 200,
"delete": 200
},
"mapped_object": {
"get": 200,
"put": 200,
"delete": 200
},
"unrelated": {
"get": 403,
"put": 403,
"delete": 403,
"map": 403,
}
}
},
}
def init_roles(self):
""" Create a delete request for the given object """
response = self.api.get_query(all_models.Role, "")
self.roles = {}
for role in response.json.get("roles_collection").get("roles"):
self.roles[role.get("name")] = role
def init_users(self):
""" Create users used by test cases """
users = [
("creator", "Creator"),
("notmapped", "Creator"),
("mapped", "Creator"),
("ProgramReader", "Creator"),
("ProgramEditor", "Creator"),
("ProgramOwner", "Creator")]
self.people = {}
for (name, role) in users:
_, user = self.object_generator.generate_person(
data={"name": name}, user_role=role)
self.people[name] = user
def delete(self, obj):
""" Create a delete request for the given object """
return self.api.delete(obj).status_code
def get(self, obj):
""" Create a get request for the given object """
return self.api.get(obj.__class__, obj.id).status_code
def put(self, obj):
""" Create a put request for the given object """
response = self.api.get(obj.__class__, obj.id)
if response.status_code == 200:
return self.api.put(obj, response.json).status_code
else:
return response.status_code
def map(self, dest):
""" Map src to dest """
response = self.api.post(all_models.Relationship, {
"relationship": {"source": {
"id": self.objects["program"].id,
"type": self.objects["program"].type,
}, "destination": {
"id": dest.id,
"type": dest.type
}, "context": None},
})
return response.status_code
def init_objects(self, test_case_name):
""" Create a Program and a Mapped object for a given test case """
# Create a program
test_case = self.test_cases[test_case_name]
creator = self.people.get('creator')
self.api.set_user(creator)
random_title = factories.random_str()
response = self.api.post(all_models.Program, {
"program": {"title": random_title, "context": None},
})
self.assertEqual(response.status_code, 201)
context_id = response.json.get("program").get("context").get("id")
program_id = response.json.get("program").get("id")
self.objects["program"] = all_models.Program.query.get(program_id)
# Create an object:
for obj in ("mapped_object", "unrelated"):
random_title = factories.random_str()
response = self.api.post(all_models.System, {
"system": {"title": random_title, "context": None},
})
self.assertEqual(response.status_code, 201)
system_id = response.json.get("system").get("id")
self.objects[obj] = all_models.System.query.get(system_id)
# Become the owner
response = self.api.post(all_models.ObjectOwner, {"object_owner": {
"person": {
"id": creator.id,
"type": "Person",
}, "ownable": {
"id": system_id,
"type": "System"
}, "context": None}})
# Map Object to Program
response = self.api.post(all_models.Relationship, {
"relationship": {"source": {
"id": program_id,
"type": "Program"
}, "destination": {
"id": self.objects["mapped_object"].id,
"type": "System"
}, "context": None},
})
self.assertEqual(response.status_code, 201)
# Map people to Program:
if test_case_name != "notmapped":
person = self.people.get(test_case_name)
response = self.api.post(all_models.ObjectPerson, {"object_person": {
"person": {
"id": person.id,
"type": "Person",
"href": "/api/people/{}".format(person.id),
}, "personable": {
"type": "Program",
"href": "/api/programs/{}".format(program_id),
"id": program_id,
}, "context": {
"type": "Context",
"id": context_id,
"href": "/api/contexts/{}".format(context_id)
}}})
# Add roles to mapped users:
if "program_role" in test_case:
person = self.people.get(test_case_name)
role = self.roles[test_case["program_role"]]
response = self.api.post(all_models.UserRole, {"user_role": {
"person": {
"id": person.id,
"type": "Person",
"href": "/api/people/{}".format(person.id),
}, "role": {
"type": "Role",
"href": "/api/roles/{}".format(role["id"]),
"id": role["id"],
}, "context": {
"type": "Context",
"id": context_id,
"href": "/api/contexts/{}".format(context_id)
}}})
self.assertEqual(response.status_code, 201)
def test_creator_program_roles(self):
""" Test creator role with all program scoped roles """
# Check permissions based on test_cases:
errors = []
for test_case in self.test_cases:
self.init_objects(test_case)
person = self.people.get(test_case)
objects = self.test_cases.get(test_case).get('objects')
self.api.set_user(person)
for obj in ("unrelated", "mapped_object", "program"):
actions = objects[obj]
for action in ("map", "get", "put", "delete"):
# reset sesion:
db.session.commit()
if action not in actions:
continue
func = getattr(self, action)
res = func(self.objects[obj])
if res != actions[action]:
errors.append(
"{}: Tried {} on {}, but received {} instead of {}".format(
test_case, action, obj, res, actions[action]))
# Try mapping
self.assertEqual(errors, []) | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\DependencyInjection\Loader\Configurator;
use Symfony\Component\HttpKernel\DependencyInjection\LazyLoadingFragmentHandler;
use Symfony\Component\HttpKernel\Fragment\EsiFragmentRenderer;
use Symfony\Component\HttpKernel\Fragment\FragmentUriGenerator;
use Symfony\Component\HttpKernel\Fragment\FragmentUriGeneratorInterface;
use Symfony\Component\HttpKernel\Fragment\HIncludeFragmentRenderer;
use Symfony\Component\HttpKernel\Fragment\InlineFragmentRenderer;
use Symfony\Component\HttpKernel\Fragment\SsiFragmentRenderer;
return static function (ContainerConfigurator $container) {
$container->parameters()
->set('fragment.renderer.hinclude.global_template', null)
->set('fragment.path', '/_fragment')
;
$container->services()
->set('fragment.handler', LazyLoadingFragmentHandler::class)
->args([
abstract_arg('fragment renderer locator'),
service('request_stack'),
param('kernel.debug'),
])
->set('fragment.uri_generator', FragmentUriGenerator::class)
->args([param('fragment.path'), service('uri_signer'), service('request_stack')])
->alias(FragmentUriGeneratorInterface::class, 'fragment.uri_generator')
->set('fragment.renderer.inline', InlineFragmentRenderer::class)
->args([service('http_kernel'), service('event_dispatcher')])
->call('setFragmentPath', [param('fragment.path')])
->tag('kernel.fragment_renderer', ['alias' => 'inline'])
->set('fragment.renderer.hinclude', HIncludeFragmentRenderer::class)
->args([
service('twig')->nullOnInvalid(),
service('uri_signer'),
param('fragment.renderer.hinclude.global_template'),
])
->call('setFragmentPath', [param('fragment.path')])
->set('fragment.renderer.esi', EsiFragmentRenderer::class)
->args([
service('esi')->nullOnInvalid(),
service('fragment.renderer.inline'),
service('uri_signer'),
])
->call('setFragmentPath', [param('fragment.path')])
->tag('kernel.fragment_renderer', ['alias' => 'esi'])
->set('fragment.renderer.ssi', SsiFragmentRenderer::class)
->args([
service('ssi')->nullOnInvalid(),
service('fragment.renderer.inline'),
service('uri_signer'),
])
->call('setFragmentPath', [param('fragment.path')])
->tag('kernel.fragment_renderer', ['alias' => 'ssi'])
;
}; | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Resources/config/fragment_renderer.php |
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2010 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the locking module"""
import os
import unittest
import time
import Queue
import threading
import random
import gc
import itertools
from ganeti import constants
from ganeti import locking
from ganeti import errors
from ganeti import utils
from ganeti import compat
from ganeti import objects
from ganeti import query
import testutils
# This is used to test the ssynchronize decorator.
# Since it's passed as input to a decorator it must be declared as a global.
_decoratorlock = locking.SharedLock("decorator lock")
#: List for looping tests
ITERATIONS = range(8)
def _Repeat(fn):
"""Decorator for executing a function many times"""
def wrapper(*args, **kwargs):
for i in ITERATIONS:
fn(*args, **kwargs)
return wrapper
def SafeSleep(duration):
start = time.time()
while True:
delay = start + duration - time.time()
if delay <= 0.0:
break
time.sleep(delay)
class _ThreadedTestCase(unittest.TestCase):
"""Test class that supports adding/waiting on threads"""
def setUp(self):
unittest.TestCase.setUp(self)
self.done = Queue.Queue(0)
self.threads = []
def _addThread(self, *args, **kwargs):
"""Create and remember a new thread"""
t = threading.Thread(*args, **kwargs)
self.threads.append(t)
t.start()
return t
def _waitThreads(self):
"""Wait for all our threads to finish"""
for t in self.threads:
t.join(60)
self.failIf(t.isAlive())
self.threads = []
class _ConditionTestCase(_ThreadedTestCase):
"""Common test case for conditions"""
def setUp(self, cls):
_ThreadedTestCase.setUp(self)
self.lock = threading.Lock()
self.cond = cls(self.lock)
def _testAcquireRelease(self):
self.assertFalse(self.cond._is_owned())
self.assertRaises(RuntimeError, self.cond.wait, None)
self.assertRaises(RuntimeError, self.cond.notifyAll)
self.cond.acquire()
self.assert_(self.cond._is_owned())
self.cond.notifyAll()
self.assert_(self.cond._is_owned())
self.cond.release()
self.assertFalse(self.cond._is_owned())
self.assertRaises(RuntimeError, self.cond.wait, None)
self.assertRaises(RuntimeError, self.cond.notifyAll)
def _testNotification(self):
def _NotifyAll():
self.done.put("NE")
self.cond.acquire()
self.done.put("NA")
self.cond.notifyAll()
self.done.put("NN")
self.cond.release()
self.cond.acquire()
self._addThread(target=_NotifyAll)
self.assertEqual(self.done.get(True, 1), "NE")
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.cond.wait(None)
self.assertEqual(self.done.get(True, 1), "NA")
self.assertEqual(self.done.get(True, 1), "NN")
self.assert_(self.cond._is_owned())
self.cond.release()
self.assertFalse(self.cond._is_owned())
class TestSingleNotifyPipeCondition(_ConditionTestCase):
"""SingleNotifyPipeCondition tests"""
def setUp(self):
_ConditionTestCase.setUp(self, locking.SingleNotifyPipeCondition)
def testAcquireRelease(self):
self._testAcquireRelease()
def testNotification(self):
self._testNotification()
def testWaitReuse(self):
self.cond.acquire()
self.cond.wait(0)
self.cond.wait(0.1)
self.cond.release()
def testNoNotifyReuse(self):
self.cond.acquire()
self.cond.notifyAll()
self.assertRaises(RuntimeError, self.cond.wait, None)
self.assertRaises(RuntimeError, self.cond.notifyAll)
self.cond.release()
class TestPipeCondition(_ConditionTestCase):
"""PipeCondition tests"""
def setUp(self):
_ConditionTestCase.setUp(self, locking.PipeCondition)
def testAcquireRelease(self):
self._testAcquireRelease()
def testNotification(self):
self._testNotification()
def _TestWait(self, fn):
threads = [
self._addThread(target=fn),
self._addThread(target=fn),
self._addThread(target=fn),
]
# Wait for threads to be waiting
for _ in threads:
self.assertEqual(self.done.get(True, 1), "A")
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.cond.acquire()
self.assertEqual(len(self.cond._waiters), 3)
self.assertEqual(self.cond._waiters, set(threads))
self.assertTrue(repr(self.cond).startswith("<"))
self.assertTrue("waiters=" in repr(self.cond))
# This new thread can't acquire the lock, and thus call wait, before we
# release it
self._addThread(target=fn)
self.cond.notifyAll()
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.cond.release()
# We should now get 3 W and 1 A (for the new thread) in whatever order
w = 0
a = 0
for i in range(4):
got = self.done.get(True, 1)
if got == "W":
w += 1
elif got == "A":
a += 1
else:
self.fail("Got %s on the done queue" % got)
self.assertEqual(w, 3)
self.assertEqual(a, 1)
self.cond.acquire()
self.cond.notifyAll()
self.cond.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "W")
self.assertRaises(Queue.Empty, self.done.get_nowait)
def testBlockingWait(self):
def _BlockingWait():
self.cond.acquire()
self.done.put("A")
self.cond.wait(None)
self.cond.release()
self.done.put("W")
self._TestWait(_BlockingWait)
def testLongTimeoutWait(self):
def _Helper():
self.cond.acquire()
self.done.put("A")
self.cond.wait(15.0)
self.cond.release()
self.done.put("W")
self._TestWait(_Helper)
def _TimeoutWait(self, timeout, check):
self.cond.acquire()
self.cond.wait(timeout)
self.cond.release()
self.done.put(check)
def testShortTimeoutWait(self):
self._addThread(target=self._TimeoutWait, args=(0.1, "T1"))
self._addThread(target=self._TimeoutWait, args=(0.1, "T1"))
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "T1")
self.assertEqual(self.done.get_nowait(), "T1")
self.assertRaises(Queue.Empty, self.done.get_nowait)
def testZeroTimeoutWait(self):
self._addThread(target=self._TimeoutWait, args=(0, "T0"))
self._addThread(target=self._TimeoutWait, args=(0, "T0"))
self._addThread(target=self._TimeoutWait, args=(0, "T0"))
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "T0")
self.assertEqual(self.done.get_nowait(), "T0")
self.assertEqual(self.done.get_nowait(), "T0")
self.assertRaises(Queue.Empty, self.done.get_nowait)
class TestSharedLock(_ThreadedTestCase):
"""SharedLock tests"""
def setUp(self):
_ThreadedTestCase.setUp(self)
self.sl = locking.SharedLock("TestSharedLock")
self.assertTrue(repr(self.sl).startswith("<"))
self.assertTrue("name=TestSharedLock" in repr(self.sl))
def testSequenceAndOwnership(self):
self.assertFalse(self.sl.is_owned())
self.sl.acquire(shared=1)
self.assert_(self.sl.is_owned())
self.assert_(self.sl.is_owned(shared=1))
self.assertFalse(self.sl.is_owned(shared=0))
self.sl.release()
self.assertFalse(self.sl.is_owned())
self.sl.acquire()
self.assert_(self.sl.is_owned())
self.assertFalse(self.sl.is_owned(shared=1))
self.assert_(self.sl.is_owned(shared=0))
self.sl.release()
self.assertFalse(self.sl.is_owned())
self.sl.acquire(shared=1)
self.assert_(self.sl.is_owned())
self.assert_(self.sl.is_owned(shared=1))
self.assertFalse(self.sl.is_owned(shared=0))
self.sl.release()
self.assertFalse(self.sl.is_owned())
def testBooleanValue(self):
# semaphores are supposed to return a true value on a successful acquire
self.assert_(self.sl.acquire(shared=1))
self.sl.release()
self.assert_(self.sl.acquire())
self.sl.release()
def testDoubleLockingStoE(self):
self.sl.acquire(shared=1)
self.assertRaises(AssertionError, self.sl.acquire)
def testDoubleLockingEtoS(self):
self.sl.acquire()
self.assertRaises(AssertionError, self.sl.acquire, shared=1)
def testDoubleLockingStoS(self):
self.sl.acquire(shared=1)
self.assertRaises(AssertionError, self.sl.acquire, shared=1)
def testDoubleLockingEtoE(self):
self.sl.acquire()
self.assertRaises(AssertionError, self.sl.acquire)
# helper functions: called in a separate thread they acquire the lock, send
# their identifier on the done queue, then release it.
def _doItSharer(self):
try:
self.sl.acquire(shared=1)
self.done.put("SHR")
self.sl.release()
except errors.LockError:
self.done.put("ERR")
def _doItExclusive(self):
try:
self.sl.acquire()
self.done.put("EXC")
self.sl.release()
except errors.LockError:
self.done.put("ERR")
def _doItDelete(self):
try:
self.sl.delete()
self.done.put("DEL")
except errors.LockError:
self.done.put("ERR")
def testSharersCanCoexist(self):
self.sl.acquire(shared=1)
threading.Thread(target=self._doItSharer).start()
self.assert_(self.done.get(True, 1))
self.sl.release()
@_Repeat
def testExclusiveBlocksExclusive(self):
self.sl.acquire()
self._addThread(target=self._doItExclusive)
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "EXC")
@_Repeat
def testExclusiveBlocksDelete(self):
self.sl.acquire()
self._addThread(target=self._doItDelete)
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "DEL")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testExclusiveBlocksSharer(self):
self.sl.acquire()
self._addThread(target=self._doItSharer)
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "SHR")
@_Repeat
def testSharerBlocksExclusive(self):
self.sl.acquire(shared=1)
self._addThread(target=self._doItExclusive)
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "EXC")
@_Repeat
def testSharerBlocksDelete(self):
self.sl.acquire(shared=1)
self._addThread(target=self._doItDelete)
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "DEL")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testWaitingExclusiveBlocksSharer(self):
"""SKIPPED testWaitingExclusiveBlockSharer"""
return
self.sl.acquire(shared=1)
# the lock is acquired in shared mode...
self._addThread(target=self._doItExclusive)
# ...but now an exclusive is waiting...
self._addThread(target=self._doItSharer)
# ...so the sharer should be blocked as well
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
# The exclusive passed before
self.failUnlessEqual(self.done.get_nowait(), "EXC")
self.failUnlessEqual(self.done.get_nowait(), "SHR")
@_Repeat
def testWaitingSharerBlocksExclusive(self):
"""SKIPPED testWaitingSharerBlocksExclusive"""
return
self.sl.acquire()
# the lock is acquired in exclusive mode...
self._addThread(target=self._doItSharer)
# ...but now a sharer is waiting...
self._addThread(target=self._doItExclusive)
# ...the exclusive is waiting too...
self.assertRaises(Queue.Empty, self.done.get_nowait)
self.sl.release()
self._waitThreads()
# The sharer passed before
self.assertEqual(self.done.get_nowait(), "SHR")
self.assertEqual(self.done.get_nowait(), "EXC")
def testDelete(self):
self.sl.delete()
self.assertRaises(errors.LockError, self.sl.acquire)
self.assertRaises(errors.LockError, self.sl.acquire, shared=1)
self.assertRaises(errors.LockError, self.sl.delete)
def testDeleteTimeout(self):
self.assertTrue(self.sl.delete(timeout=60))
def testDeleteTimeoutFail(self):
ready = threading.Event()
finish = threading.Event()
def fn():
self.sl.acquire(shared=0)
ready.set()
finish.wait()
self.sl.release()
self._addThread(target=fn)
ready.wait()
# Test if deleting a lock owned in exclusive mode by another thread fails
# to delete when a timeout is used
self.assertFalse(self.sl.delete(timeout=0.02))
finish.set()
self._waitThreads()
self.assertTrue(self.sl.delete())
self.assertRaises(errors.LockError, self.sl.acquire)
def testNoDeleteIfSharer(self):
self.sl.acquire(shared=1)
self.assertRaises(AssertionError, self.sl.delete)
@_Repeat
def testDeletePendingSharersExclusiveDelete(self):
self.sl.acquire()
self._addThread(target=self._doItSharer)
self._addThread(target=self._doItSharer)
self._addThread(target=self._doItExclusive)
self._addThread(target=self._doItDelete)
self.sl.delete()
self._waitThreads()
# The threads who were pending return ERR
for _ in range(4):
self.assertEqual(self.done.get_nowait(), "ERR")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testDeletePendingDeleteExclusiveSharers(self):
self.sl.acquire()
self._addThread(target=self._doItDelete)
self._addThread(target=self._doItExclusive)
self._addThread(target=self._doItSharer)
self._addThread(target=self._doItSharer)
self.sl.delete()
self._waitThreads()
# The two threads who were pending return both ERR
self.assertEqual(self.done.get_nowait(), "ERR")
self.assertEqual(self.done.get_nowait(), "ERR")
self.assertEqual(self.done.get_nowait(), "ERR")
self.assertEqual(self.done.get_nowait(), "ERR")
self.sl = locking.SharedLock(self.sl.name)
@_Repeat
def testExclusiveAcquireTimeout(self):
for shared in [0, 1]:
on_queue = threading.Event()
release_exclusive = threading.Event()
def _LockExclusive():
self.sl.acquire(shared=0, test_notify=on_queue.set)
self.done.put("A: start wait")
release_exclusive.wait()
self.done.put("A: end wait")
self.sl.release()
# Start thread to hold lock in exclusive mode
self._addThread(target=_LockExclusive)
# Wait for wait to begin
self.assertEqual(self.done.get(timeout=60), "A: start wait")
# Wait up to 60s to get lock, but release exclusive lock as soon as we're
# on the queue
self.failUnless(self.sl.acquire(shared=shared, timeout=60,
test_notify=release_exclusive.set))
self.done.put("got 2nd")
self.sl.release()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "A: end wait")
self.assertEqual(self.done.get_nowait(), "got 2nd")
self.assertRaises(Queue.Empty, self.done.get_nowait)
@_Repeat
def testAcquireExpiringTimeout(self):
def _AcquireWithTimeout(shared, timeout):
if not self.sl.acquire(shared=shared, timeout=timeout):
self.done.put("timeout")
for shared in [0, 1]:
# Lock exclusively
self.sl.acquire()
# Start shared acquires with timeout between 0 and 20 ms
for i in range(11):
self._addThread(target=_AcquireWithTimeout,
args=(shared, i * 2.0 / 1000.0))
# Wait for threads to finish (makes sure the acquire timeout expires
# before releasing the lock)
self._waitThreads()
# Release lock
self.sl.release()
for _ in range(11):
self.assertEqual(self.done.get_nowait(), "timeout")
self.assertRaises(Queue.Empty, self.done.get_nowait)
@_Repeat
def testSharedSkipExclusiveAcquires(self):
# Tests whether shared acquires jump in front of exclusive acquires in the
# queue.
def _Acquire(shared, name, notify_ev, wait_ev):
if notify_ev:
notify_fn = notify_ev.set
else:
notify_fn = None
if wait_ev:
wait_ev.wait()
if not self.sl.acquire(shared=shared, test_notify=notify_fn):
return
self.done.put(name)
self.sl.release()
# Get exclusive lock while we fill the queue
self.sl.acquire()
shrcnt1 = 5
shrcnt2 = 7
shrcnt3 = 9
shrcnt4 = 2
# Add acquires using threading.Event for synchronization. They'll be
# acquired exactly in the order defined in this list.
acquires = (shrcnt1 * [(1, "shared 1")] +
3 * [(0, "exclusive 1")] +
shrcnt2 * [(1, "shared 2")] +
shrcnt3 * [(1, "shared 3")] +
shrcnt4 * [(1, "shared 4")] +
3 * [(0, "exclusive 2")])
ev_cur = None
ev_prev = None
for args in acquires:
ev_cur = threading.Event()
self._addThread(target=_Acquire, args=args + (ev_cur, ev_prev))
ev_prev = ev_cur
# Wait for last acquire to start
ev_prev.wait()
# Expect 6 pending exclusive acquires and 1 for all shared acquires
# together
self.assertEqual(self.sl._count_pending(), 7)
# Release exclusive lock and wait
self.sl.release()
self._waitThreads()
# Check sequence
for _ in range(shrcnt1 + shrcnt2 + shrcnt3 + shrcnt4):
# Shared locks aren't guaranteed to be notified in order, but they'll be
# first
tmp = self.done.get_nowait()
if tmp == "shared 1":
shrcnt1 -= 1
elif tmp == "shared 2":
shrcnt2 -= 1
elif tmp == "shared 3":
shrcnt3 -= 1
elif tmp == "shared 4":
shrcnt4 -= 1
self.assertEqual(shrcnt1, 0)
self.assertEqual(shrcnt2, 0)
self.assertEqual(shrcnt3, 0)
self.assertEqual(shrcnt3, 0)
for _ in range(3):
self.assertEqual(self.done.get_nowait(), "exclusive 1")
for _ in range(3):
self.assertEqual(self.done.get_nowait(), "exclusive 2")
self.assertRaises(Queue.Empty, self.done.get_nowait)
def testIllegalDowngrade(self):
# Not yet acquired
self.assertRaises(AssertionError, self.sl.downgrade)
# Acquire in shared mode, downgrade should be no-op
self.assertTrue(self.sl.acquire(shared=1))
self.assertTrue(self.sl.is_owned(shared=1))
self.assertTrue(self.sl.downgrade())
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
def testDowngrade(self):
self.assertTrue(self.sl.acquire())
self.assertTrue(self.sl.is_owned(shared=0))
self.assertTrue(self.sl.downgrade())
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
@_Repeat
def testDowngradeJumpsAheadOfExclusive(self):
def _KeepExclusive(ev_got, ev_downgrade, ev_release):
self.assertTrue(self.sl.acquire())
self.assertTrue(self.sl.is_owned(shared=0))
ev_got.set()
ev_downgrade.wait()
self.assertTrue(self.sl.is_owned(shared=0))
self.assertTrue(self.sl.downgrade())
self.assertTrue(self.sl.is_owned(shared=1))
ev_release.wait()
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
def _KeepExclusive2(ev_started, ev_release):
self.assertTrue(self.sl.acquire(test_notify=ev_started.set))
self.assertTrue(self.sl.is_owned(shared=0))
ev_release.wait()
self.assertTrue(self.sl.is_owned(shared=0))
self.sl.release()
def _KeepShared(ev_started, ev_got, ev_release):
self.assertTrue(self.sl.acquire(shared=1, test_notify=ev_started.set))
self.assertTrue(self.sl.is_owned(shared=1))
ev_got.set()
ev_release.wait()
self.assertTrue(self.sl.is_owned(shared=1))
self.sl.release()
# Acquire lock in exclusive mode
ev_got_excl1 = threading.Event()
ev_downgrade_excl1 = threading.Event()
ev_release_excl1 = threading.Event()
th_excl1 = self._addThread(target=_KeepExclusive,
args=(ev_got_excl1, ev_downgrade_excl1,
ev_release_excl1))
ev_got_excl1.wait()
# Start a second exclusive acquire
ev_started_excl2 = threading.Event()
ev_release_excl2 = threading.Event()
th_excl2 = self._addThread(target=_KeepExclusive2,
args=(ev_started_excl2, ev_release_excl2))
ev_started_excl2.wait()
# Start shared acquires, will jump ahead of second exclusive acquire when
# first exclusive acquire downgrades
ev_shared = [(threading.Event(), threading.Event()) for _ in range(5)]
ev_release_shared = threading.Event()
th_shared = [self._addThread(target=_KeepShared,
args=(ev_started, ev_got, ev_release_shared))
for (ev_started, ev_got) in ev_shared]
# Wait for all shared acquires to start
for (ev, _) in ev_shared:
ev.wait()
# Check lock information
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE, query.LQ_OWNER])),
[(self.sl.name, "exclusive", [th_excl1.getName()], None)])
[(_, _, _, pending), ] = self.sl.GetLockInfo(set([query.LQ_PENDING]))
self.assertEqual([(pendmode, sorted(waiting))
for (pendmode, waiting) in pending],
[("exclusive", [th_excl2.getName()]),
("shared", sorted(th.getName() for th in th_shared))])
# Shared acquires won't start until the exclusive lock is downgraded
ev_downgrade_excl1.set()
# Wait for all shared acquires to be successful
for (_, ev) in ev_shared:
ev.wait()
# Check lock information again
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE,
query.LQ_PENDING])),
[(self.sl.name, "shared", None,
[("exclusive", [th_excl2.getName()])])])
[(_, _, owner, _), ] = self.sl.GetLockInfo(set([query.LQ_OWNER]))
self.assertEqual(set(owner), set([th_excl1.getName()] +
[th.getName() for th in th_shared]))
ev_release_excl1.set()
ev_release_excl2.set()
ev_release_shared.set()
self._waitThreads()
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE, query.LQ_OWNER,
query.LQ_PENDING])),
[(self.sl.name, None, None, [])])
@_Repeat
def testMixedAcquireTimeout(self):
sync = threading.Event()
def _AcquireShared(ev):
if not self.sl.acquire(shared=1, timeout=None):
return
self.done.put("shared")
# Notify main thread
ev.set()
# Wait for notification from main thread
sync.wait()
# Release lock
self.sl.release()
acquires = []
for _ in range(3):
ev = threading.Event()
self._addThread(target=_AcquireShared, args=(ev, ))
acquires.append(ev)
# Wait for all acquires to finish
for i in acquires:
i.wait()
self.assertEqual(self.sl._count_pending(), 0)
# Try to get exclusive lock
self.failIf(self.sl.acquire(shared=0, timeout=0.02))
# Acquire exclusive without timeout
exclsync = threading.Event()
exclev = threading.Event()
def _AcquireExclusive():
if not self.sl.acquire(shared=0):
return
self.done.put("exclusive")
# Notify main thread
exclev.set()
# Wait for notification from main thread
exclsync.wait()
self.sl.release()
self._addThread(target=_AcquireExclusive)
# Try to get exclusive lock
self.failIf(self.sl.acquire(shared=0, timeout=0.02))
# Make all shared holders release their locks
sync.set()
# Wait for exclusive acquire to succeed
exclev.wait()
self.assertEqual(self.sl._count_pending(), 0)
# Try to get exclusive lock
self.failIf(self.sl.acquire(shared=0, timeout=0.02))
def _AcquireSharedSimple():
if self.sl.acquire(shared=1, timeout=None):
self.done.put("shared2")
self.sl.release()
for _ in range(10):
self._addThread(target=_AcquireSharedSimple)
# Tell exclusive lock to release
exclsync.set()
# Wait for everything to finish
self._waitThreads()
self.assertEqual(self.sl._count_pending(), 0)
# Check sequence
for _ in range(3):
self.assertEqual(self.done.get_nowait(), "shared")
self.assertEqual(self.done.get_nowait(), "exclusive")
for _ in range(10):
self.assertEqual(self.done.get_nowait(), "shared2")
self.assertRaises(Queue.Empty, self.done.get_nowait)
def testPriority(self):
# Acquire in exclusive mode
self.assert_(self.sl.acquire(shared=0))
# Queue acquires
def _Acquire(prev, next, shared, priority, result):
prev.wait()
self.sl.acquire(shared=shared, priority=priority, test_notify=next.set)
try:
self.done.put(result)
finally:
self.sl.release()
counter = itertools.count(0)
priorities = range(-20, 30)
first = threading.Event()
prev = first
# Data structure:
# {
# priority:
# [(shared/exclusive, set(acquire names), set(pending threads)),
# (shared/exclusive, ...),
# ...,
# ],
# }
perprio = {}
# References shared acquire per priority in L{perprio}. Data structure:
# {
# priority: (shared=1, set(acquire names), set(pending threads)),
# }
prioshared = {}
for seed in [4979, 9523, 14902, 32440]:
# Use a deterministic random generator
rnd = random.Random(seed)
for priority in [rnd.choice(priorities) for _ in range(30)]:
modes = [0, 1]
rnd.shuffle(modes)
for shared in modes:
# Unique name
acqname = "%s/shr=%s/prio=%s" % (counter.next(), shared, priority)
ev = threading.Event()
thread = self._addThread(target=_Acquire,
args=(prev, ev, shared, priority, acqname))
prev = ev
# Record expected aqcuire, see above for structure
data = (shared, set([acqname]), set([thread]))
priolist = perprio.setdefault(priority, [])
if shared:
priosh = prioshared.get(priority, None)
if priosh:
# Shared acquires are merged
for i, j in zip(priosh[1:], data[1:]):
i.update(j)
assert data[0] == priosh[0]
else:
prioshared[priority] = data
priolist.append(data)
else:
priolist.append(data)
# Start all acquires and wait for them
first.set()
prev.wait()
# Check lock information
self.assertEqual(self.sl.GetLockInfo(set()),
[(self.sl.name, None, None, None)])
self.assertEqual(self.sl.GetLockInfo(set([query.LQ_MODE, query.LQ_OWNER])),
[(self.sl.name, "exclusive",
[threading.currentThread().getName()], None)])
self._VerifyPrioPending(self.sl.GetLockInfo(set([query.LQ_PENDING])),
perprio)
# Let threads acquire the lock
self.sl.release()
# Wait for everything to finish
self._waitThreads()
self.assert_(self.sl._check_empty())
# Check acquires by priority
for acquires in [perprio[i] for i in sorted(perprio.keys())]:
for (_, names, _) in acquires:
# For shared acquires, the set will contain 1..n entries. For exclusive
# acquires only one.
while names:
names.remove(self.done.get_nowait())
self.assertFalse(compat.any(names for (_, names, _) in acquires))
self.assertRaises(Queue.Empty, self.done.get_nowait)
def _VerifyPrioPending(self, ((name, mode, owner, pending), ), perprio):
self.assertEqual(name, self.sl.name)
self.assert_(mode is None)
self.assert_(owner is None)
self.assertEqual([(pendmode, sorted(waiting))
for (pendmode, waiting) in pending],
[(["exclusive", "shared"][int(bool(shared))],
sorted(t.getName() for t in threads))
for acquires in [perprio[i]
for i in sorted(perprio.keys())]
for (shared, _, threads) in acquires])
class _FakeTimeForSpuriousNotifications:
def __init__(self, now, check_end):
self.now = now
self.check_end = check_end
# Deterministic random number generator
self.rnd = random.Random(15086)
def time(self):
# Advance time if the random number generator thinks so (this is to test
# multiple notifications without advancing the time)
if self.rnd.random() < 0.3:
self.now += self.rnd.random()
self.check_end(self.now)
return self.now
@_Repeat
def testAcquireTimeoutWithSpuriousNotifications(self):
ready = threading.Event()
locked = threading.Event()
req = Queue.Queue(0)
epoch = 4000.0
timeout = 60.0
def check_end(now):
self.assertFalse(locked.isSet())
# If we waited long enough (in virtual time), tell main thread to release
# lock, otherwise tell it to notify once more
req.put(now < (epoch + (timeout * 0.8)))
time_fn = self._FakeTimeForSpuriousNotifications(epoch, check_end).time
sl = locking.SharedLock("test", _time_fn=time_fn)
# Acquire in exclusive mode
sl.acquire(shared=0)
def fn():
self.assertTrue(sl.acquire(shared=0, timeout=timeout,
test_notify=ready.set))
locked.set()
sl.release()
self.done.put("success")
# Start acquire with timeout and wait for it to be ready
self._addThread(target=fn)
ready.wait()
# The separate thread is now waiting to acquire the lock, so start sending
# spurious notifications.
# Wait for separate thread to ask for another notification
count = 0
while req.get():
# After sending the notification, the lock will take a short amount of
# time to notice and to retrieve the current time
sl._notify_topmost()
count += 1
self.assertTrue(count > 100, "Not enough notifications were sent")
self.assertFalse(locked.isSet())
# Some notifications have been sent, now actually release the lock
sl.release()
# Wait for lock to be acquired
locked.wait()
self._waitThreads()
self.assertEqual(self.done.get_nowait(), "success")
self.assertRaises(Queue.Empty, self.done.get_nowait)
class TestSharedLockInCondition(_ThreadedTestCase):
"""SharedLock as a condition lock tests"""
def setUp(self):
_ThreadedTestCase.setUp(self)
self.sl = locking.SharedLock("TestSharedLockInCondition")
self.setCondition()
def setCondition(self):
self.cond = threading.Condition(self.sl)
def testKeepMode(self):
self.cond.acquire(shared=1)
self.assert_(self.sl.is_owned(shared=1))
self.cond.wait(0)
self.assert_(self.sl.is_owned(shared=1))
self.cond.release()
self.cond.acquire(shared=0)
self.assert_(self.sl.is_owned(shared=0))
self.cond.wait(0)
self.assert_(self.sl.is_owned(shared=0))
self.cond.release()
class TestSharedLockInPipeCondition(TestSharedLockInCondition):
"""SharedLock as a pipe condition lock tests"""
def setCondition(self):
self.cond = locking.PipeCondition(self.sl)
class TestSSynchronizedDecorator(_ThreadedTestCase):
"""Shared Lock Synchronized decorator test"""
def setUp(self):
_ThreadedTestCase.setUp(self)
@locking.ssynchronized(_decoratorlock)
def _doItExclusive(self):
self.assert_(_decoratorlock.is_owned())
self.done.put("EXC")
@locking.ssynchronized(_decoratorlock, shared=1)
def _doItSharer(self):
self.assert_(_decoratorlock.is_owned(shared=1))
self.done.put("SHR")
def testDecoratedFunctions(self):
self._doItExclusive()
self.assertFalse(_decoratorlock.is_owned())
self._doItSharer()
self.assertFalse(_decoratorlock.is_owned())
def testSharersCanCoexist(self):
_decoratorlock.acquire(shared=1)
threading.Thread(target=self._doItSharer).start()
self.assert_(self.done.get(True, 1))
_decoratorlock.release()
@_Repeat
def testExclusiveBlocksExclusive(self):
_decoratorlock.acquire()
self._addThread(target=self._doItExclusive)
# give it a bit of time to check that it's not actually doing anything
self.assertRaises(Queue.Empty, self.done.get_nowait)
_decoratorlock.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "EXC")
@_Repeat
def testExclusiveBlocksSharer(self):
_decoratorlock.acquire()
self._addThread(target=self._doItSharer)
self.assertRaises(Queue.Empty, self.done.get_nowait)
_decoratorlock.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "SHR")
@_Repeat
def testSharerBlocksExclusive(self):
_decoratorlock.acquire(shared=1)
self._addThread(target=self._doItExclusive)
self.assertRaises(Queue.Empty, self.done.get_nowait)
_decoratorlock.release()
self._waitThreads()
self.failUnlessEqual(self.done.get_nowait(), "EXC")
if __name__ == "__main__":
testutils.GanetiTestProgram() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing.
This module contains helper methods for performing handshake, frame
sending/receiving as a WebSocket client.
This is code for testing mod_pywebsocket. Keep this code independent from
mod_pywebsocket. Don't import e.g. Stream class for generating frame for
testing. Using util.hexify, etc. that are not related to protocol processing
is allowed.
Note:
This code is far from robust, e.g., we cut corners in handshake.
"""
import base64
import errno
import logging
import os
import random
import re
import socket
import struct
import time
from mod_pywebsocket import common
from mod_pywebsocket import util
DEFAULT_PORT = 80
DEFAULT_SECURE_PORT = 443
# Opcodes introduced in IETF HyBi 01 for the new framing format
OPCODE_CONTINUATION = 0x0
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
# Strings used for handshake
_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
_UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n'
_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
# Status codes
STATUS_NORMAL_CLOSURE = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA = 1003
STATUS_NO_STATUS_RECEIVED = 1005
STATUS_ABNORMAL_CLOSURE = 1006
STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_MANDATORY_EXT = 1010
STATUS_INTERNAL_ENDPOINT_ERROR = 1011
STATUS_TLS_HANDSHAKE = 1015
# Extension tokens
_DEFLATE_FRAME_EXTENSION = 'deflate-frame'
# TODO(bashi): Update after mux implementation finished.
_MUX_EXTENSION = 'mux_DO_NOT_USE'
_PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
def _method_line(resource):
return 'GET %s HTTP/1.1\r\n' % resource
def _sec_origin_header(origin):
return 'Sec-WebSocket-Origin: %s\r\n' % origin.lower()
def _origin_header(origin):
# 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
# and the /origin/ value, converted to ASCII lowercase, to /fields/.
return 'Origin: %s\r\n' % origin.lower()
def _format_host_header(host, port, secure):
# 4.1 9. Let /hostport/ be an empty string.
# 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
# /hostport/
hostport = host.lower()
# 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
# is true, and /port/ is not 443, then append a U+003A COLON character
# (:) followed by the value of /port/, expressed as a base-ten integer,
# to /hostport/
if ((not secure and port != DEFAULT_PORT) or
(secure and port != DEFAULT_SECURE_PORT)):
hostport += ':' + str(port)
# 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
# character, and /hostport/, to /fields/.
return 'Host: %s\r\n' % hostport
# TODO(tyoshino): Define a base class and move these shared methods to that.
def receive_bytes(socket, length):
received_bytes = []
remaining = length
while remaining > 0:
new_received_bytes = socket.recv(remaining)
if not new_received_bytes:
raise Exception(
'Connection closed before receiving requested length '
'(requested %d bytes but received only %d bytes)' %
(length, length - remaining))
received_bytes.append(new_received_bytes)
remaining -= len(new_received_bytes)
return ''.join(received_bytes)
# TODO(tyoshino): Now the WebSocketHandshake class diverts these methods. We
# should move to HTTP parser as specified in RFC 6455. For HyBi 00 and
# Hixie 75, pack these methods as some parser class.
def _read_fields(socket):
# 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
fields = {}
while True:
# 4.1 33. let /name/ and /value/ be empty byte arrays
name = ''
value = ''
# 4.1 34. read /name/
name = _read_name(socket)
if name is None:
break
# 4.1 35. read spaces
# TODO(tyoshino): Skip only one space as described in the spec.
ch = _skip_spaces(socket)
# 4.1 36. read /value/
value = _read_value(socket, ch)
# 4.1 37. read a byte from the server
ch = receive_bytes(socket, 1)
if ch != '\n': # 0x0A
raise Exception(
'Expected LF but found %r while reading value %r for header '
'%r' % (ch, name, value))
# 4.1 38. append an entry to the /fields/ list that has the name
# given by the string obtained by interpreting the /name/ byte
# array as a UTF-8 stream and the value given by the string
# obtained by interpreting the /value/ byte array as a UTF-8 byte
# stream.
fields.setdefault(name, []).append(value)
# 4.1 39. return to the "Field" step above
return fields
def _read_name(socket):
# 4.1 33. let /name/ be empty byte arrays
name = ''
while True:
# 4.1 34. read a byte from the server
ch = receive_bytes(socket, 1)
if ch == '\r': # 0x0D
return None
elif ch == '\n': # 0x0A
raise Exception(
'Unexpected LF when reading header name %r' % name)
elif ch == ':': # 0x3A
return name
elif ch >= 'A' and ch <= 'Z': # range 0x31 to 0x5A
ch = chr(ord(ch) + 0x20)
name += ch
else:
name += ch
def _skip_spaces(socket):
# 4.1 35. read a byte from the server
while True:
ch = receive_bytes(socket, 1)
if ch == ' ': # 0x20
continue
return ch
def _read_value(socket, ch):
# 4.1 33. let /value/ be empty byte arrays
value = ''
# 4.1 36. read a byte from server.
while True:
if ch == '\r': # 0x0D
return value
elif ch == '\n': # 0x0A
raise Exception(
'Unexpected LF when reading header value %r' % value)
else:
value += ch
ch = receive_bytes(socket, 1)
def read_frame_header(socket):
received = receive_bytes(socket, 2)
first_byte = ord(received[0])
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
second_byte = ord(received[1])
mask = (second_byte >> 7) & 1
payload_length = second_byte & 0x7f
if mask != 0:
raise Exception(
'Mask bit must be 0 for frames coming from server')
if payload_length == 127:
extended_payload_length = receive_bytes(socket, 8)
payload_length = struct.unpack(
'!Q', extended_payload_length)[0]
if payload_length > 0x7FFFFFFFFFFFFFFF:
raise Exception('Extended payload length >= 2^63')
elif payload_length == 126:
extended_payload_length = receive_bytes(socket, 2)
payload_length = struct.unpack(
'!H', extended_payload_length)[0]
return fin, rsv1, rsv2, rsv3, opcode, payload_length
class _TLSSocket(object):
"""Wrapper for a TLS connection."""
def __init__(self, raw_socket):
self._ssl = socket.ssl(raw_socket)
def send(self, bytes):
return self._ssl.write(bytes)
def recv(self, size=-1):
return self._ssl.read(size)
def close(self):
# Nothing to do.
pass
class HttpStatusException(Exception):
"""This exception will be raised when unexpected http status code was
received as a result of handshake.
"""
def __init__(self, name, status):
super(HttpStatusException, self).__init__(name)
self.status = status
class WebSocketHandshake(object):
"""Opening handshake processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
def handshake(self, socket):
"""Handshake WebSocket.
Raises:
Exception: handshake failed.
"""
self._socket = socket
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
fields = []
fields.append(_UPGRADE_HEADER)
fields.append(_CONNECTION_HEADER)
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
if self._options.version is 8:
fields.append(_sec_origin_header(self._options.origin))
else:
fields.append(_origin_header(self._options.origin))
original_key = os.urandom(16)
key = base64.b64encode(original_key)
self._logger.debug(
'Sec-WebSocket-Key: %s (%s)', key, util.hexify(original_key))
fields.append('Sec-WebSocket-Key: %s\r\n' % key)
fields.append('Sec-WebSocket-Version: %d\r\n' % self._options.version)
# Setting up extensions.
if len(self._options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(self._options.extensions))
self._logger.debug('Opening handshake request headers: %r', fields)
for field in fields:
self._socket.sendall(field)
self._socket.sendall('\r\n')
self._logger.info('Sent opening handshake request')
field = ''
while True:
ch = receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
self._logger.debug('Opening handshake Response-Line: %r', field)
if len(field) < 7 or not field.endswith('\r\n'):
raise Exception('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise Exception(
'No HTTP status code found in status line: %r' % field)
code = m.group(1)
if not re.match('[0-9][0-9][0-9]', code):
raise Exception(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
if code != '101':
raise HttpStatusException(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field), int(code))
fields = _read_fields(self._socket)
ch = receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise Exception('Expected LF but found: %r' % ch)
self._logger.debug('Opening handshake response headers: %r', fields)
# Check /fields/
if len(fields['upgrade']) != 1:
raise Exception(
'Multiple Upgrade headers found: %s' % fields['upgrade'])
if len(fields['connection']) != 1:
raise Exception(
'Multiple Connection headers found: %s' % fields['connection'])
if fields['upgrade'][0] != 'websocket':
raise Exception(
'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
if fields['connection'][0].lower() != 'upgrade':
raise Exception(
'Unexpected Connection header value: %s' %
fields['connection'][0])
if len(fields['sec-websocket-accept']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Accept headers found: %s' %
fields['sec-websocket-accept'])
accept = fields['sec-websocket-accept'][0]
# Validate
try:
decoded_accept = base64.b64decode(accept)
except TypeError, e:
raise HandshakeException(
'Illegal value for header Sec-WebSocket-Accept: ' + accept)
if len(decoded_accept) != 20:
raise HandshakeException(
'Decoded value of Sec-WebSocket-Accept is not 20-byte long')
self._logger.debug('Actual Sec-WebSocket-Accept: %r (%s)',
accept, util.hexify(decoded_accept))
original_expected_accept = util.sha1_hash(
key + WEBSOCKET_ACCEPT_UUID).digest()
expected_accept = base64.b64encode(original_expected_accept)
self._logger.debug('Expected Sec-WebSocket-Accept: %r (%s)',
expected_accept,
util.hexify(original_expected_accept))
if accept != expected_accept:
raise Exception(
'Invalid Sec-WebSocket-Accept header: %r (expected) != %r '
'(actual)' % (accept, expected_accept))
server_extensions_header = fields.get('sec-websocket-extensions')
accepted_extensions = []
if server_extensions_header is not None:
accepted_extensions = common.parse_extensions(
', '.join(server_extensions_header))
# Scan accepted extension list to check if there is any unrecognized
# extensions or extensions we didn't request in it. Then, for
# extensions we request, parse them and store parameters. They will be
# used later by each extension.
deflate_frame_accepted = False
mux_accepted = False
for extension in accepted_extensions:
if extension.name() == _DEFLATE_FRAME_EXTENSION:
if self._options.use_deflate_frame:
deflate_frame_accepted = True
continue
if extension.name() == _MUX_EXTENSION:
if self._options.use_mux:
mux_accepted = True
continue
if extension.name() == _PERMESSAGE_DEFLATE_EXTENSION:
checker = self._options.check_permessage_deflate
if checker:
checker(extension)
continue
raise Exception(
'Received unrecognized extension: %s' % extension.name())
# Let all extensions check the response for extension request.
if (self._options.use_deflate_frame and
not deflate_frame_accepted):
raise Exception('%s extension not accepted' %
_DEFLATE_FRAME_EXTENSION)
if self._options.use_mux and not mux_accepted:
raise Exception('%s extension not accepted' % _MUX_EXTENSION)
class WebSocketHybi00Handshake(object):
"""Opening handshake processor for the WebSocket protocol version HyBi 00.
"""
def __init__(self, options, draft_field):
self._logger = util.get_class_logger(self)
self._options = options
self._draft_field = draft_field
def handshake(self, socket):
"""Handshake WebSocket.
Raises:
Exception: handshake failed.
"""
self._socket = socket
# 4.1 5. send request line.
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
# 4.1 6. Let /fields/ be an empty list of strings.
fields = []
# 4.1 7. Add the string "Upgrade: WebSocket" to /fields/.
fields.append(_UPGRADE_HEADER_HIXIE75)
# 4.1 8. Add the string "Connection: Upgrade" to /fields/.
fields.append(_CONNECTION_HEADER)
# 4.1 9-12. Add Host: field to /fields/.
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
# 4.1 13. Add Origin: field to /fields/.
fields.append(_origin_header(self._options.origin))
# TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/.
# TODO: 4.1 15 Add cookie headers to /fields/.
# 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/.
self._number1, key1 = self._generate_sec_websocket_key()
self._logger.debug('Number1: %d', self._number1)
fields.append('Sec-WebSocket-Key1: %s\r\n' % key1)
self._number2, key2 = self._generate_sec_websocket_key()
self._logger.debug('Number2: %d', self._number1)
fields.append('Sec-WebSocket-Key2: %s\r\n' % key2)
fields.append('Sec-WebSocket-Draft: %s\r\n' % self._draft_field)
# 4.1 24. For each string in /fields/, in a random order: send the
# string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE
# RETURN U+000A LINE FEED character pair (CRLF).
random.shuffle(fields)
self._logger.debug('Opening handshake request headers: %r', fields)
for field in fields:
self._socket.sendall(field)
# 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED
# character pair (CRLF).
self._socket.sendall('\r\n')
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
self._key3 = self._generate_key3()
# 4.1 27. send /key3/ to the server.
self._socket.sendall(self._key3)
self._logger.debug(
'Key3: %r (%s)', self._key3, util.hexify(self._key3))
self._logger.info('Sent opening handshake request')
# 4.1 28. Read bytes from the server until either the connection
# closes, or a 0x0A byte is read. let /field/ be these bytes, including
# the 0x0A bytes.
field = ''
while True:
ch = receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
self._logger.debug('Opening handshake Response-Line: %r', field)
# if /field/ is not at least seven bytes long, or if the last
# two bytes aren't 0x0D and 0x0A respectively, or if it does not
# contain at least two 0x20 bytes, then fail the WebSocket connection
# and abort these steps.
if len(field) < 7 or not field.endswith('\r\n'):
raise Exception('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise Exception('No code found in status line: %r' % field)
# 4.1 29. let /code/ be the substring of /field/ that starts from the
# byte after the first 0x20 byte, and ends with the byte before the
# second 0x20 byte.
code = m.group(1)
# 4.1 30. if /code/ is not three bytes long, or if any of the bytes in
# /code/ are not in the range 0x30 to 0x90, then fail the WebSocket
# connection and abort these steps.
if not re.match('[0-9][0-9][0-9]', code):
raise Exception(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
# 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the
# next step.
if code != '101':
raise HttpStatusException(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field), int(code))
# 4.1 32-39. read fields into /fields/
fields = _read_fields(self._socket)
self._logger.debug('Opening handshake response headers: %r', fields)
# 4.1 40. _Fields processing_
# read a byte from server
ch = receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise Exception('Expected LF but found %r' % ch)
# 4.1 41. check /fields/
if len(fields['upgrade']) != 1:
raise Exception(
'Multiple Upgrade headers found: %s' % fields['upgrade'])
if len(fields['connection']) != 1:
raise Exception(
'Multiple Connection headers found: %s' % fields['connection'])
if len(fields['sec-websocket-origin']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Origin headers found: %s' %
fields['sec-sebsocket-origin'])
if len(fields['sec-websocket-location']) != 1:
raise Exception(
'Multiple Sec-WebSocket-Location headers found: %s' %
fields['sec-sebsocket-location'])
# TODO(ukai): protocol
# if the entry's name is "upgrade"
# if the value is not exactly equal to the string "WebSocket",
# then fail the WebSocket connection and abort these steps.
if fields['upgrade'][0] != 'WebSocket':
raise Exception(
'Unexpected Upgrade header value: %s' % fields['upgrade'][0])
# if the entry's name is "connection"
# if the value, converted to ASCII lowercase, is not exactly equal
# to the string "upgrade", then fail the WebSocket connection and
# abort these steps.
if fields['connection'][0].lower() != 'upgrade':
raise Exception(
'Unexpected Connection header value: %s' %
fields['connection'][0])
# TODO(ukai): check origin, location, cookie, ..
# 4.1 42. let /challenge/ be the concatenation of /number_1/,
# expressed as a big endian 32 bit integer, /number_2/, expressed
# as big endian 32 bit integer, and the eight bytes of /key_3/ in the
# order they were sent on the wire.
challenge = struct.pack('!I', self._number1)
challenge += struct.pack('!I', self._number2)
challenge += self._key3
self._logger.debug(
'Challenge: %r (%s)', challenge, util.hexify(challenge))
# 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a
# big-endian 128 bit string.
expected = util.md5_hash(challenge).digest()
self._logger.debug(
'Expected challenge response: %r (%s)',
expected, util.hexify(expected))
# 4.1 44. read sixteen bytes from the server.
# let /reply/ be those bytes.
reply = receive_bytes(self._socket, 16)
self._logger.debug(
'Actual challenge response: %r (%s)', reply, util.hexify(reply))
# 4.1 45. if /reply/ does not exactly equal /expected/, then fail
# the WebSocket connection and abort these steps.
if expected != reply:
raise Exception(
'Bad challenge response: %r (expected) != %r (actual)' %
(expected, reply))
# 4.1 46. The *WebSocket connection is established*.
def _generate_sec_websocket_key(self):
# 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive.
spaces = random.randint(1, 12)
# 4.1 17. let /max_n/ be the largest integer not greater than
# 4,294,967,295 divided by /spaces_n/.
maxnum = 4294967295 / spaces
# 4.1 18. let /number_n/ be a random integer from 0 to /max_n/
# inclusive.
number = random.randint(0, maxnum)
# 4.1 19. let /product_n/ be the result of multiplying /number_n/ and
# /spaces_n/ together.
product = number * spaces
# 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed
# in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to
# U+0039 DIGIT NINE (9).
key = str(product)
# 4.1 21. insert between one and twelve random characters from the
# range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random
# positions.
available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
n = random.randint(1, 12)
for _ in xrange(n):
ch = random.choice(available_chars)
pos = random.randint(0, len(key))
key = key[0:pos] + chr(ch) + key[pos:]
# 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at
# random positions other than start or end of the string.
for _ in xrange(spaces):
pos = random.randint(1, len(key) - 1)
key = key[0:pos] + ' ' + key[pos:]
return number, key
def _generate_key3(self):
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)])
class WebSocketHixie75Handshake(object):
"""WebSocket handshake processor for IETF Hixie 75."""
_EXPECTED_RESPONSE = (
'HTTP/1.1 101 Web Socket Protocol Handshake\r\n' +
_UPGRADE_HEADER_HIXIE75 +
_CONNECTION_HEADER)
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
def _skip_headers(self):
terminator = '\r\n\r\n'
pos = 0
while pos < len(terminator):
received = receive_bytes(self._socket, 1)
if received == terminator[pos]:
pos += 1
elif received == terminator[0]:
pos = 1
else:
pos = 0
def handshake(self, socket):
self._socket = socket
request_line = _method_line(self._options.resource)
self._logger.debug('Opening handshake Request-Line: %r', request_line)
self._socket.sendall(request_line)
headers = _UPGRADE_HEADER_HIXIE75 + _CONNECTION_HEADER
headers += _format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls)
headers += _origin_header(self._options.origin)
self._logger.debug('Opening handshake request headers: %r', headers)
self._socket.sendall(headers)
self._socket.sendall('\r\n')
self._logger.info('Sent opening handshake request')
for expected_char in WebSocketHixie75Handshake._EXPECTED_RESPONSE:
received = receive_bytes(self._socket, 1)
if expected_char != received:
raise Exception('Handshake failure')
# We cut corners and skip other headers.
self._skip_headers()
class WebSocketStream(object):
"""Frame processor for the WebSocket protocol (RFC 6455)."""
def __init__(self, socket, handshake):
self._handshake = handshake
self._socket = socket
# Filters applied to application data part of data frames.
self._outgoing_frame_filter = None
self._incoming_frame_filter = None
if self._handshake._options.use_deflate_frame:
self._outgoing_frame_filter = (
util._RFC1979Deflater(None, False))
self._incoming_frame_filter = util._RFC1979Inflater()
self._fragmented = False
def _mask_hybi(self, s):
# TODO(tyoshino): os.urandom does open/read/close for every call. If
# performance matters, change this to some library call that generates
# cryptographically secure pseudo random number sequence.
masking_nonce = os.urandom(4)
result = [masking_nonce]
count = 0
for c in s:
result.append(chr(ord(c) ^ ord(masking_nonce[count])))
count = (count + 1) % len(masking_nonce)
return ''.join(result)
def send_frame_of_arbitrary_bytes(self, header, body):
self._socket.sendall(header + self._mask_hybi(body))
def send_data(self, payload, frame_type, end=True, mask=True,
rsv1=0, rsv2=0, rsv3=0):
if self._outgoing_frame_filter is not None:
payload = self._outgoing_frame_filter.filter(payload)
if self._fragmented:
opcode = OPCODE_CONTINUATION
else:
opcode = frame_type
if end:
self._fragmented = False
fin = 1
else:
self._fragmented = True
fin = 0
if self._handshake._options.use_deflate_frame:
rsv1 = 1
if mask:
mask_bit = 1 << 7
else:
mask_bit = 0
header = chr(fin << 7 | rsv1 << 6 | rsv2 << 5 | rsv3 << 4 | opcode)
payload_length = len(payload)
if payload_length <= 125:
header += chr(mask_bit | payload_length)
elif payload_length < 1 << 16:
header += chr(mask_bit | 126) + struct.pack('!H', payload_length)
elif payload_length < 1 << 63:
header += chr(mask_bit | 127) + struct.pack('!Q', payload_length)
else:
raise Exception('Too long payload (%d byte)' % payload_length)
if mask:
payload = self._mask_hybi(payload)
self._socket.sendall(header + payload)
def send_binary(self, payload, end=True, mask=True):
self.send_data(payload, OPCODE_BINARY, end, mask)
def send_text(self, payload, end=True, mask=True):
self.send_data(payload.encode('utf-8'), OPCODE_TEXT, end, mask)
def _assert_receive_data(self, payload, opcode, fin, rsv1, rsv2, rsv3):
(actual_fin, actual_rsv1, actual_rsv2, actual_rsv3, actual_opcode,
payload_length) = read_frame_header(self._socket)
if actual_opcode != opcode:
raise Exception(
'Unexpected opcode: %d (expected) vs %d (actual)' %
(opcode, actual_opcode))
if actual_fin != fin:
raise Exception(
'Unexpected fin: %d (expected) vs %d (actual)' %
(fin, actual_fin))
if rsv1 is None:
rsv1 = 0
if self._handshake._options.use_deflate_frame:
rsv1 = 1
if rsv2 is None:
rsv2 = 0
if rsv3 is None:
rsv3 = 0
if actual_rsv1 != rsv1:
raise Exception(
'Unexpected rsv1: %r (expected) vs %r (actual)' %
(rsv1, actual_rsv1))
if actual_rsv2 != rsv2:
raise Exception(
'Unexpected rsv2: %r (expected) vs %r (actual)' %
(rsv2, actual_rsv2))
if actual_rsv3 != rsv3:
raise Exception(
'Unexpected rsv3: %r (expected) vs %r (actual)' %
(rsv3, actual_rsv3))
received = receive_bytes(self._socket, payload_length)
if self._incoming_frame_filter is not None:
received = self._incoming_frame_filter.filter(received)
if len(received) != len(payload):
raise Exception(
'Unexpected payload length: %d (expected) vs %d (actual)' %
(len(payload), len(received)))
if payload != received:
raise Exception(
'Unexpected payload: %r (expected) vs %r (actual)' %
(payload, received))
def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
rsv1=None, rsv2=None, rsv3=None):
self._assert_receive_data(payload, opcode, fin, rsv1, rsv2, rsv3)
def assert_receive_text(self, payload, opcode=OPCODE_TEXT, fin=1,
rsv1=None, rsv2=None, rsv3=None):
self._assert_receive_data(payload.encode('utf-8'), opcode, fin, rsv1,
rsv2, rsv3)
def _build_close_frame(self, code, reason, mask):
frame = chr(1 << 7 | OPCODE_CLOSE)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
if mask:
frame += chr(1 << 7 | len(body)) + self._mask_hybi(body)
else:
frame += chr(len(body)) + body
return frame
def send_close(self, code, reason):
self._socket.sendall(
self._build_close_frame(code, reason, True))
def assert_receive_close(self, code, reason):
expected_frame = self._build_close_frame(code, reason, False)
actual_frame = receive_bytes(self._socket, len(expected_frame))
if actual_frame != expected_frame:
raise Exception(
'Unexpected close frame: %r (expected) vs %r (actual)' %
(expected_frame, actual_frame))
class WebSocketStreamHixie75(object):
"""Frame processor for the WebSocket protocol version Hixie 75 and HyBi 00.
"""
_CLOSE_FRAME = '\xff\x00'
def __init__(self, socket, unused_handshake):
self._socket = socket
def send_frame_of_arbitrary_bytes(self, header, body):
self._socket.sendall(header + body)
def send_data(self, payload, unused_frame_typem, unused_end, unused_mask):
frame = ''.join(['\x00', payload, '\xff'])
self._socket.sendall(frame)
def send_binary(self, unused_payload, unused_end, unused_mask):
pass
def send_text(self, payload, unused_end, unused_mask):
encoded_payload = payload.encode('utf-8')
frame = ''.join(['\x00', encoded_payload, '\xff'])
self._socket.sendall(frame)
def assert_receive_binary(self, payload, opcode=OPCODE_BINARY, fin=1,
rsv1=0, rsv2=0, rsv3=0):
raise Exception('Binary frame is not supported in hixie75')
def assert_receive_text(self, payload):
received = receive_bytes(self._socket, 1)
if received != '\x00':
raise Exception(
'Unexpected frame type: %d (expected) vs %d (actual)' %
(0, ord(received)))
received = receive_bytes(self._socket, len(payload) + 1)
if received[-1] != '\xff':
raise Exception(
'Termination expected: 0xff (expected) vs %r (actual)' %
received)
if received[0:-1] != payload:
raise Exception(
'Unexpected payload: %r (expected) vs %r (actual)' %
(payload, received[0:-1]))
def send_close(self, code, reason):
self._socket.sendall(self._CLOSE_FRAME)
def assert_receive_close(self, unused_code, unused_reason):
closing = receive_bytes(self._socket, len(self._CLOSE_FRAME))
if closing != self._CLOSE_FRAME:
raise Exception('Didn\'t receive closing handshake')
class ClientOptions(object):
"""Holds option values to configure the Client object."""
def __init__(self):
self.version = 13
self.server_host = ''
self.origin = ''
self.resource = ''
self.server_port = -1
self.socket_timeout = 1000
self.use_tls = False
self.extensions = []
# Enable deflate-application-data.
self.use_deflate_frame = False
# Enable mux
self.use_mux = False
def enable_deflate_frame(self):
self.use_deflate_frame = True
self.extensions.append(_DEFLATE_FRAME_EXTENSION)
def enable_mux(self):
self.use_mux = True
self.extensions.append(_MUX_EXTENSION)
def connect_socket_with_retry(host, port, timeout, use_tls,
retry=10, sleep_sec=0.1):
retry_count = 0
while retry_count < retry:
try:
s = socket.socket()
s.settimeout(timeout)
s.connect((host, port))
if use_tls:
return _TLSSocket(s)
return s
except socket.error, e:
if e.errno != errno.ECONNREFUSED:
raise
else:
retry_count = retry_count + 1
time.sleep(sleep_sec)
return None
class Client(object):
"""WebSocket client."""
def __init__(self, options, handshake, stream_class):
self._logger = util.get_class_logger(self)
self._options = options
self._socket = None
self._handshake = handshake
self._stream_class = stream_class
def connect(self):
self._socket = connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = self._stream_class(self._socket, self._handshake)
self._logger.info('Connection established')
def send_frame_of_arbitrary_bytes(self, header, body):
self._stream.send_frame_of_arbitrary_bytes(header, body)
def send_message(self, message, end=True, binary=False, raw=False,
mask=True):
if binary:
self._stream.send_binary(message, end, mask)
elif raw:
self._stream.send_data(message, OPCODE_TEXT, end, mask)
else:
self._stream.send_text(message, end, mask)
def assert_receive(self, payload, binary=False):
if binary:
self._stream.assert_receive_binary(payload)
else:
self._stream.assert_receive_text(payload)
def send_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
self._stream.send_close(code, reason)
def assert_receive_close(self, code=STATUS_NORMAL_CLOSURE, reason=''):
self._stream.assert_receive_close(code, reason)
def close_socket(self):
self._socket.close()
def assert_connection_closed(self):
try:
read_data = receive_bytes(self._socket, 1)
except Exception, e:
if str(e).find(
'Connection closed before receiving requested length ') == 0:
return
try:
error_number, message = e
for error_name in ['ECONNRESET', 'WSAECONNRESET']:
if (error_name in dir(errno) and
error_number == getattr(errno, error_name)):
return
except:
raise e
raise e
raise Exception('Connection is not closed (Read: %r)' % read_data)
def create_client(options):
return Client(
options, WebSocketHandshake(options), WebSocketStream)
def create_client_hybi00(options):
return Client(
options,
WebSocketHybi00Handshake(options, '0'),
WebSocketStreamHixie75)
def create_client_hixie75(options):
return Client(
options, WebSocketHixie75Handshake(options), WebSocketStreamHixie75)
# vi:sts=4 sw=4 et | unknown | codeparrot/codeparrot-clean | ||
"""Spark SQL agent.""" | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/__init__.py |
# coding=utf-8
import re
_RE_FIND_FIRST_CAP = re.compile('(.)([A-Z][a-z]+)')
_RE_SPAN_OF_CAPS = re.compile('([a-z0-9])([A-Z])')
def camelcase_to_underscore(name):
return _RE_SPAN_OF_CAPS.sub(r'\1_\2',
_RE_FIND_FIRST_CAP.sub(r'\1_\2', name)
).lower()
class binary:
"""
Store the value in bits so we can convert between things easily
"""
value = None
def __init__(self, value=None, unit=None):
self.do(value=value, unit=unit)
@staticmethod
def convert(value=None, oldUnit=None, newUnit=None):
convertor = binary(value=value, unit=oldUnit)
return convertor.get(unit=newUnit)
def set(self, value, unit=None):
return self.do(value=value, unit=unit)
def get(self, unit=None):
return self.do(unit=unit)
def do(self, value=None, unit=None):
if not unit:
return self.bit(value=value)
if unit in ['bit', 'b']:
return self.bit(value=value)
if unit in ['kilobit', 'kbit', 'Kibit']:
return self.kilobit(value=value)
if unit in ['megabit', 'Mbit', 'Mibit', 'Mbit']:
return self.megabit(value=value)
if unit in ['gigabit', 'Gbit', 'Gibit']:
return self.gigabit(value=value)
if unit in ['terabit', 'Tbit', 'Tibit']:
return self.terabit(value=value)
if unit in ['petabit', 'Pbit', 'Pibit']:
return self.petabit(value=value)
if unit in ['exabit', 'Ebit', 'Eibit']:
return self.exabit(value=value)
if unit in ['zettabit', 'Zbit', 'Zibit']:
return self.zettabit(value=value)
if unit in ['yottabit', 'Ybit', 'Yibit']:
return self.yottabit(value=value)
if unit in ['byte', 'B']:
return self.byte(value=value)
if unit in ['kilobyte', 'kB', 'KiB']:
return self.kilobyte(value=value)
if unit in ['megabyte', 'MB', 'MiB', 'Mbyte']:
return self.megabyte(value=value)
if unit in ['gigabyte', 'GB', 'GiB']:
return self.gigabyte(value=value)
if unit in ['terabyte', 'TB', 'TiB']:
return self.terabyte(value=value)
if unit in ['petabyte', 'PB', 'PiB']:
return self.petabyte(value=value)
if unit in ['exabyte', 'EB', 'EiB']:
return self.exabyte(value=value)
if unit in ['zettabyte', 'ZB', 'ZiB']:
return self.zettabyte(value=value)
if unit in ['yottabyte', 'YB', 'YiB']:
return self.yottabyte(value=value)
raise NotImplementedError("unit %s" % unit)
def bit(self, value=None):
if value is None:
return self.value
else:
self.value = float(value)
def kilobit(self, value=None):
if value is None:
return self.bit() / 1024
else:
self.bit(value * 1024)
def megabit(self, value=None):
if value is None:
return self.kilobit() / 1024
else:
self.kilobit(value * 1024)
def gigabit(self, value=None):
if value is None:
return self.megabit() / 1024
else:
self.megabit(value * 1024)
def terabit(self, value=None):
if value is None:
return self.gigabit() / 1024
else:
self.gigabit(value * 1024)
def petabit(self, value=None):
if value is None:
return self.terabit() / 1024
else:
self.terabit(value * 1024)
def exabit(self, value=None):
if value is None:
return self.petabit() / 1024
else:
self.petabit(value * 1024)
def zettabit(self, value=None):
if value is None:
return self.exabit() / 1024
else:
self.exabit(value * 1024)
def yottabit(self, value=None):
if value is None:
return self.zettabit() / 1024
else:
self.zettabit(value * 1024)
def byte(self, value=None):
if value is None:
return self.value / 8
else:
self.value = float(value) * 8
def kilobyte(self, value=None):
if value is None:
return self.byte() / 1024
else:
self.byte(value * 1024)
def megabyte(self, value=None):
if value is None:
return self.kilobyte() / 1024
else:
self.kilobyte(value * 1024)
def gigabyte(self, value=None):
if value is None:
return self.megabyte() / 1024
else:
self.megabyte(value * 1024)
def terabyte(self, value=None):
if value is None:
return self.gigabyte() / 1024
else:
self.gigabyte(value * 1024)
def petabyte(self, value=None):
if value is None:
return self.terabyte() / 1024
else:
self.terabyte(value * 1024)
def exabyte(self, value=None):
if value is None:
return self.petabyte() / 1024
else:
self.petabyte(value * 1024)
def zettabyte(self, value=None):
if value is None:
return self.exabyte() / 1024
else:
self.exabyte(value * 1024)
def yottabyte(self, value=None):
if value is None:
return self.zettabyte() / 1024
else:
self.zettabyte(value * 1024)
class time:
"""
Store the value in miliseconds so we can convert between things easily
"""
value = None
def __init__(self, value=None, unit=None):
self.do(value=value, unit=unit)
@staticmethod
def convert(value=None, oldUnit=None, newUnit=None):
convertor = time(value=value, unit=oldUnit)
return convertor.get(unit=newUnit)
def set(self, value, unit=None):
return self.do(value=value, unit=unit)
def get(self, unit=None):
return self.do(unit=unit)
def do(self, value=None, unit=None):
if not unit:
return self.millisecond(value=value)
else:
unit = unit.lower()
if unit in ['millisecond', 'milliseconds', 'ms']:
return self.millisecond(value=value)
if unit in ['second', 'seconds', 's']:
return self.second(value=value)
raise NotImplementedError("unit %s" % unit)
def millisecond(self, value=None):
if value is None:
return self.value
else:
self.value = float(value)
def second(self, value=None):
if value is None:
return self.millisecond() / 1000
else:
self.millisecond(value * 1000) | unknown | codeparrot/codeparrot-clean | ||
import {ChangeDetectionStrategy, Component, signal} from '@angular/core';
import {email, form, FormField, required} from '@angular/forms/signals';
interface LoginData {
email: string;
password: string;
rememberMe: boolean;
}
@Component({
selector: 'app-root',
templateUrl: './app.html',
styleUrl: './app.css',
imports: [FormField],
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class App {
loginModel = signal<LoginData>({
email: '',
password: '',
rememberMe: false,
});
loginForm = form(this.loginModel, (fieldPath) => {
required(fieldPath.email, {message: 'Email is required'});
email(fieldPath.email, {message: 'Enter a valid email address'});
required(fieldPath.password, {message: 'Password is required'});
});
} | typescript | github | https://github.com/angular/angular | adev/src/content/tutorials/signal-forms/steps/3-add-validation/answer/src/app/app.ts |
Archive of books from {{ year }}. {{ object_list|length }} books found. | html | github | https://github.com/django/django | tests/generic_views/templates/generic_views/book_archive_year.html |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that .so files that are order only dependencies are specified by
their install location rather than by their alias.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('shared_dependency.gyp',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
if test.format=='android':
makefile_path = 'relocate/src/GypAndroid.mk'
else:
makefile_path = 'relocate/src/Makefile'
with open(makefile_path) as makefile:
make_contents = makefile.read()
# If we remove the code to generate lib1, Make should still be able
# to build lib2 since lib1.so already exists.
make_contents = make_contents.replace('include lib1.target.mk', '')
with open(makefile_path, 'w') as makefile:
makefile.write(make_contents)
test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
test.pass_test() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from imaplib import IMAP4
import time
import re
import json
import daemon
# change these variable values
DEBUG = False
HOSTNAME = 'mail.server.dom'
USERNAME = 'account@server.dom'
PASSWORD = 'passsword'
MAILBOX = 'Inbox'
DATAFILE='/tmp/led.db'
NEWMAIL_OFFSET = 1 # my unread messages never goes to zero, yours might
MAIL_CHECK_FREQ = 30 # check mail every 60 seconds
#do not change bellow this lines
def checkmail():
try:
server = IMAP4(HOSTNAME)
server.login(USERNAME, PASSWORD)
except:
print 'connection failed!'
return
if DEBUG:
print('Logging in as ' + USERNAME)
select_info = server.select(MAILBOX)
total_email=re.findall(r'\d+(?:[.,]\d+)?', select_info[1][0])
print('%d messages in %s' % (int(total_email[0]), MAILBOX))
folder_status = server.status(MAILBOX, '(UNSEEN)')
total_unseen = re.findall(r'\d+(?:[.,]\d+)?', folder_status[1][0])
newmails = int(total_unseen[0])
if DEBUG:
print "You have", newmails, "new email(s)!"
if newmails >= NEWMAIL_OFFSET:
update_signal('new', 11)
else:
update_signal('normal', 11)
time.sleep(MAIL_CHECK_FREQ)
def update_signal(mesg='new', port=12):
data={'port': int(port), 'mesg': mesg}
dataserial=json.dumps(data)
try:
dbfile=file(DATAFILE, 'w')
dbfile.write(dataserial)
dbfile.close()
except:
if DEBUG:
print 'error update datafile.'
def run():
with daemon.DaemonContext():
while True:
checkmail()
if __name__ == '__main__':
run() | unknown | codeparrot/codeparrot-clean | ||
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Various utilities and cookbook-like things.
"""
# STDLIB
import codecs
import contextlib
import io
import re
import gzip
from distutils import version
__all__ = [
'convert_to_writable_filelike',
'stc_reference_frames',
'coerce_range_list_param',
]
@contextlib.contextmanager
def convert_to_writable_filelike(fd, compressed=False):
"""
Returns a writable file-like object suitable for streaming output.
Parameters
----------
fd : file path string or writable file-like object
May be:
- a file path, in which case it is opened, and the file
object is returned.
- an object with a :meth:``write`` method, in which case that
object is returned.
compressed : bool, optional
If `True`, create a gzip-compressed file. (Default is `False`).
Returns
-------
fd : writable file-like object
"""
if isinstance(fd, str):
if fd.endswith('.gz') or compressed:
with gzip.GzipFile(fd, 'wb') as real_fd:
encoded_fd = io.TextIOWrapper(real_fd, encoding='utf8')
yield encoded_fd
encoded_fd.flush()
real_fd.flush()
return
else:
with open(fd, 'wt', encoding='utf8') as real_fd:
yield real_fd
return
elif hasattr(fd, 'write'):
assert callable(fd.write)
if compressed:
fd = gzip.GzipFile(fileobj=fd)
# If we can't write Unicode strings, use a codecs.StreamWriter
# object
needs_wrapper = False
try:
fd.write('')
except TypeError:
needs_wrapper = True
if not hasattr(fd, 'encoding') or fd.encoding is None:
needs_wrapper = True
if needs_wrapper:
yield codecs.getwriter('utf-8')(fd)
fd.flush()
else:
yield fd
fd.flush()
return
else:
raise TypeError("Can not be coerced to writable file-like object")
# <http://www.ivoa.net/Documents/REC/DM/STC-20071030.html>
stc_reference_frames = set([
'FK4', 'FK5', 'ECLIPTIC', 'ICRS', 'GALACTIC', 'GALACTIC_I', 'GALACTIC_II',
'SUPER_GALACTIC', 'AZ_EL', 'BODY', 'GEO_C', 'GEO_D', 'MAG', 'GSE', 'GSM',
'SM', 'HGC', 'HGS', 'HEEQ', 'HRTN', 'HPC', 'HPR', 'HCC', 'HGI',
'MERCURY_C', 'VENUS_C', 'LUNA_C', 'MARS_C', 'JUPITER_C_III',
'SATURN_C_III', 'URANUS_C_III', 'NEPTUNE_C_III', 'PLUTO_C', 'MERCURY_G',
'VENUS_G', 'LUNA_G', 'MARS_G', 'JUPITER_G_III', 'SATURN_G_III',
'URANUS_G_III', 'NEPTUNE_G_III', 'PLUTO_G', 'UNKNOWNFrame'])
def coerce_range_list_param(p, frames=None, numeric=True):
"""
Coerces and/or verifies the object *p* into a valid range-list-format parameter.
As defined in `Section 8.7.2 of Simple
Spectral Access Protocol
<http://www.ivoa.net/Documents/REC/DAL/SSA-20080201.html>`_.
Parameters
----------
p : str or sequence
May be a string as passed verbatim to the service expecting a
range-list, or a sequence. If a sequence, each item must be
either:
- a numeric value
- a named value, such as, for example, 'J' for named
spectrum (if the *numeric* kwarg is False)
- a 2-tuple indicating a range
- the last item my be a string indicating the frame of
reference
frames : sequence of str, optional
A sequence of acceptable frame of reference keywords. If not
provided, the default set in ``set_reference_frames`` will be
used.
numeric : bool, optional
TODO
Returns
-------
parts : tuple
The result is a tuple:
- a string suitable for passing to a service as a range-list
argument
- an integer counting the number of elements
"""
def str_or_none(x):
if x is None:
return ''
if numeric:
x = float(x)
return str(x)
def numeric_or_range(x):
if isinstance(x, tuple) and len(x) == 2:
return '{}/{}'.format(str_or_none(x[0]), str_or_none(x[1]))
else:
return str_or_none(x)
def is_frame_of_reference(x):
return isinstance(x, str)
if p is None:
return None, 0
elif isinstance(p, (tuple, list)):
has_frame_of_reference = len(p) > 1 and is_frame_of_reference(p[-1])
if has_frame_of_reference:
points = p[:-1]
else:
points = p[:]
out = ','.join([numeric_or_range(x) for x in points])
length = len(points)
if has_frame_of_reference:
if frames is not None and p[-1] not in frames:
raise ValueError(
"'{}' is not a valid frame of reference".format(p[-1]))
out += ';' + p[-1]
length += 1
return out, length
elif isinstance(p, str):
number = r'([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)?'
if not numeric:
number = r'(' + number + ')|([A-Z_]+)'
match = re.match(
'^' + number + r'([,/]' + number +
r')+(;(?P<frame>[<A-Za-z_0-9]+))?$',
p)
if match is None:
raise ValueError(f"'{p}' is not a valid range list")
frame = match.groupdict()['frame']
if frames is not None and frame is not None and frame not in frames:
raise ValueError(
f"'{frame}' is not a valid frame of reference")
return p, p.count(',') + p.count(';') + 1
try:
float(p)
return str(p), 1
except TypeError:
raise ValueError(f"'{p}' is not a valid range list")
def version_compare(a, b):
"""
Compare two VOTable version identifiers.
"""
def version_to_tuple(v):
if v[0].lower() == 'v':
v = v[1:]
return version.StrictVersion(v)
av = version_to_tuple(a)
bv = version_to_tuple(b)
# Can't use cmp because it was removed from Python 3.x
return (av > bv) - (av < bv) | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Ivan Kopeykin @vankop
*/
"use strict";
const RuntimeGlobals = require("../RuntimeGlobals");
const RuntimeModule = require("../RuntimeModule");
class NonceRuntimeModule extends RuntimeModule {
constructor() {
super("nonce", RuntimeModule.STAGE_ATTACH);
}
/**
* @returns {string | null} runtime code
*/
generate() {
return `${RuntimeGlobals.scriptNonce} = undefined;`;
}
}
module.exports = NonceRuntimeModule; | javascript | github | https://github.com/webpack/webpack | lib/runtime/NonceRuntimeModule.js |
import { ok, test } from '../../test';
export default test({
get props() {
return {
/** @type {string | number | null | undefined} */
testName1: 'test1',
/** @type {string | number | null | undefined} */
testName2: 'test2'
};
},
html: '<div class="test1test2 svelte-70s021"></div>',
async test({ assert, component, target }) {
const div = target.querySelector('div');
ok(div);
assert.equal(div.className, 'test1test2 svelte-70s021');
component.testName1 = null;
component.testName2 = null;
assert.equal(div.className, '0 svelte-70s021');
component.testName1 = null;
component.testName2 = 'test';
assert.equal(div.className, 'nulltest svelte-70s021');
component.testName1 = undefined;
component.testName2 = 'test';
assert.equal(div.className, 'undefinedtest svelte-70s021');
component.testName1 = undefined;
component.testName2 = undefined;
assert.equal(div.className, 'NaN svelte-70s021');
component.testName1 = null;
component.testName2 = 1;
assert.equal(div.className, '1 svelte-70s021');
component.testName1 = undefined;
component.testName2 = 1;
assert.equal(div.className, 'NaN svelte-70s021');
component.testName1 = null;
component.testName2 = 0;
assert.equal(div.className, '0 svelte-70s021');
component.testName1 = undefined;
component.testName2 = 0;
assert.equal(div.className, 'NaN svelte-70s021');
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/runtime-legacy/samples/attribute-null-func-classnames-with-style/_config.js |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Trond Hindenes <trond@hindenes.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_dsc
version_added: "2.4"
short_description: Invokes a PowerShell DSC configuration
description:
- Invokes a PowerShell DSC Configuration. Requires PowerShell version 5 (February release or newer).
- Most of the parameters for this module are dynamic and will vary depending on the DSC Resource.
options:
resource_name:
description:
- The DSC Resource to use. Must be accessible to PowerShell using any of the default paths.
required: true
module_version:
description:
- Can be used to configure the exact version of the dsc resource to be invoked.
- Useful if the target node has multiple versions installed of the module containing the DSC resource.
- If not specified, the module will follow standard Powershell convention and use the highest version available.
default: latest
author: Trond Hindenes
'''
EXAMPLES = r'''
# Playbook example
- name: Extract zip file
win_dsc:
resource_name: archive
ensure: Present
path: "C:\\Temp\\zipfile.zip"
destination: "C:\\Temp\\Temp2"
- name: Invoke DSC with check mode
win_dsc:
resource_name: windowsfeature
name: telnet-client
'''
RETURN = r'''
resource_name:
description: The name of the invoked resource
returned: always
type: string
sample: windowsfeature
module_version:
description: The version of the dsc resource/module used.
returned: success
type: string
sample: "1.0.1"
attributes:
description: The attributes/parameters passed in to the DSC resource as key/value pairs
returned: always
type: complex
sample:
contains:
Key:
description: Attribute key
Value:
description: Attribute value
dsc_attributes:
description: The attributes/parameters as returned from the DSC engine in dict format
returned: always
type: complex
contains:
Key:
description: Attribute key
Value:
description: Attribute value
reboot_required:
description: flag returned from the DSC engine indicating whether or not the machine requires a reboot for the invoked changes to take effect
returned: always
type: boolean
sample: True
message:
description: any error message from invoking the DSC resource
returned: error
type: string
sample: Multiple DSC modules found with resource name xyz
''' | unknown | codeparrot/codeparrot-clean | ||
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
SurnameBase class for GRAMPS.
"""
from ..ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from surname import Surname
from const import IDENTICAL, EQUAL
#-------------------------------------------------------------------------
#
# SurnameBase classes
#
#-------------------------------------------------------------------------
class SurnameBase(object):
"""
Base class for surname-aware objects.
"""
def __init__(self, source=None):
"""
Initialize a SurnameBase.
If the source is not None, then object is initialized from values of
the source object.
:param source: Object used to initialize the new object
:type source: SurnameBase
"""
self.surname_list = map(Surname, source.surname_list) if source else []
def serialize(self):
"""
Convert the object to a serialized tuple of data.
"""
return [surname.serialize() for surname in self.surname_list]
def to_struct(self):
"""
Convert the data held in this object to a structure (eg,
struct) that represents all the data elements.
This method is used to recursively convert the object into a
self-documenting form that can easily be used for various
purposes, including diffs and queries.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns a struct containing the data of the object.
:rtype: list
"""
return [surname.to_struct() for surname in self.surname_list]
def unserialize(self, data):
"""
Convert a serialized tuple of data to an object.
"""
self.surname_list = [Surname().unserialize(item) for item in data]
def add_surname(self, surname):
"""
Add the :class:`~gen.lib.surname.Surname` instance to the object's
list of surnames.
:param surname: :class:`~gen.lib.surname.Surname` instance to add to
the object's address list.
:type address: list
"""
self.surname_list.append(surname)
def remove_surname(self, surname):
"""
Remove the specified :class:`~gen.lib.surname.Surname` instance from
the surname list.
If the instance does not exist in the list, the operation has
no effect.
:param surname: :class:`~gen.lib.surname.Surname` instance to remove
from the list
:type surname: :class:`~gen.lib.surname.Surname`
:returns: True if the surname was removed, False if it was not in the list.
:rtype: bool
"""
if surname in self.surname_list:
self.surname_list.remove(surname)
return True
else:
return False
def get_surname_list(self):
"""
Return the list of :class:`~gen.lib.surname.Surname` instances a
ssociated with the object.
:returns: Returns the list of :class:`~gen.lib.surname.Surname` instances
:rtype: list
"""
return self.surname_list
def set_surname_list(self, surname_list):
"""
Assign the passed list to the object's list of
:class:`~gen.lib.surname.Surname` instances.
:param surname_list: List of :class:`~gen.lib.surname.surname` instances
to be associated with the object
:type surname_list: list
"""
self.surname_list = surname_list
def get_primary_surname(self):
"""
Return the surname that is the primary surname
:returns: Returns the surname instance that
is the primary surname. If primary not set, and there is a surname,
the first surname is given, if no surnames, None is returned
:rtype: :class:`~gen.lib.surname.Surname` or None
"""
for surname in self.surname_list:
if surname.primary:
return surname
if self.surname_list:
return self.surname_list[0]
else:
#self healing, add a surname to this object and return it
self.set_surname_list([Surname()])
return self.surname_list[0]
def set_primary_surname(self, surnamenr=0):
"""
Set the surname with surnamenr in the surname list as primary surname
Counting starts at 0
"""
assert isinstance(surnamenr, int), "Surname.set_primary_surname requires integer"
if surnamenr >= len(self.surname_list):
return
for surname in self.surname_list:
surname.set_primary(False)
self.surname_list[surnamenr].set_primary(True)
def _merge_surname_list(self, acquisition):
"""
Merge the list of surname from acquisition with our own.
This method is normally only called when surnames are equal, if they
are different, the merge code should fall back to storing an
alternate name. For completeness, the code is present nevertheless.
:param acquisition: the surname list of this object will be merged with
the current surname list.
:rtype acquisition: SurnameBase
"""
surname_list = self.surname_list[:]
for addendum in acquisition.get_surname_list():
for surname in surname_list:
equi = surname.is_equivalent(addendum)
if equi == IDENTICAL:
break
elif equi == EQUAL:
#This should normally never happen, an alternate name
# should be added
surname.merge(addendum)
break
else:
self.surname_list.append(addendum)
def get_surname(self):
"""
Return a fully formatted surname utilizing the surname_list
"""
totalsurn = ""
for surn in self.surname_list:
partsurn = surn.get_surname()
if surn.get_prefix():
fsurn = _('%(first)s %(second)s') % {'first': surn.get_prefix(),
'second': partsurn}
else:
fsurn = partsurn
fsurn = fsurn.strip()
if surn.get_connector():
fsurn = _('%(first)s %(second)s') % {'first': fsurn,
'second': surn.get_connector()}
fsurn = fsurn.strip()
totalsurn = _('%(first)s %(second)s') % {'first': totalsurn,
'second': fsurn}
return totalsurn.strip()
def get_upper_surname(self):
"""Return a fully formatted surname capitalized"""
return self.get_surname().upper()
def get_surnames(self):
"""
Return a list of surnames (no prefix or connectors)
"""
surnl = []
for surn in self.surname_list:
realsurn = surn.get_surname()
if realsurn:
surnl.append(realsurn)
return surnl
def get_prefixes(self):
"""
Return a list of prefixes
"""
prefixl = []
for surn in self.surname_list:
prefix = surn.get_prefix()
if prefix:
prefixl.append(prefix)
return prefixl
def get_connectors(self):
"""
Return a list of surnames (no prefix or connectors)
"""
connl = []
for surn in self.surname_list:
conn = surn.get_connector()
if conn:
connl.append(conn)
return connl | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import edi
import res_partner
import res_company
import res_currency
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
from operator import itemgetter
import logging
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
# django < 1.7 support
from django.db.models import get_model
from django.conf import settings
try:
from django.contrib.admin.utils import get_fields_from_path
except ImportError:
# django < 1.7 support
from django.contrib.admin.util import get_fields_from_path
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.encoding import force_text
from django.views.generic import View
from braces.views import (CsrfExemptMixin, StaffuserRequiredMixin,
JSONResponseMixin)
logger = logging.getLogger('advanced_filters.views')
class GetFieldChoices(CsrfExemptMixin, StaffuserRequiredMixin,
JSONResponseMixin, View):
"""
A JSONResponse view that accepts a model and a field (path to field),
resolves and returns the valid choices for that field.
Model must use the "app.Model" notation.
If this field is not a simple Integer/CharField with predefined choices,
all distinct entries in the DB are presented, unless field name is in
ADVANCED_FILTERS_DISABLE_FOR_FIELDS and limited to display only results
under ADVANCED_FILTERS_MAX_CHOICES.
"""
def get(self, request, model=None, field_name=None):
if model is field_name is None:
return self.render_json_response(
{'error': "GetFieldChoices view requires 2 arguments"},
status=400)
app_label, model_name = model.split('.', 1)
try:
model_obj = get_model(app_label, model_name)
field = get_fields_from_path(model_obj, field_name)[-1]
model_obj = field.model # use new model if followed a ForeignKey
except AttributeError as e:
logger.debug("Invalid kwargs passed to view: %s", e)
return self.render_json_response(
{'error': "No installed app/model: %s" % model}, status=400)
except (LookupError, FieldDoesNotExist) as e:
logger.debug("Invalid kwargs passed to view: %s", e)
return self.render_json_response(
{'error': force_text(e)}, status=400)
choices = field.choices
# if no choices, populate with distinct values from instances
if not choices:
choices = []
disabled = getattr(settings, 'ADVANCED_FILTERS_DISABLE_FOR_FIELDS',
tuple())
max_choices = getattr(settings, 'ADVANCED_FILTERS_MAX_CHOICES', 254)
if field.name in disabled:
logger.debug('Skipped lookup of choices for disabled fields')
elif isinstance(field, (models.BooleanField, models.DateField,
models.TimeField)):
logger.debug('No choices calculated for field %s of type %s',
field, type(field))
else:
# the order_by() avoids ambiguity with values() and distinct()
choices = model_obj.objects.order_by(field.name).values_list(
field.name, flat=True).distinct()
# additional query is ok to avoid fetching too many values
if choices.count() <= max_choices:
choices = zip(choices, choices)
logger.debug('Choices found for field %s: %s',
field.name, choices)
else:
choices = []
results = [{'id': c[0], 'text': force_text(c[1])} for c in sorted(
choices, key=itemgetter(0))]
return self.render_json_response({'results': results}) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2014 Freescale Semiconductor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.common import constants as n_const
from neutron.common import log
from neutron.extensions import portbindings
from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.freescale import config # noqa
LOG = logging.getLogger(__name__)
class FslsdnMechanismDriver(api.MechanismDriver):
"""Freescale SDN OS Mechanism Driver for ML2 Plugin."""
@log.log
def initialize(self):
"""Initialize the Mechanism driver."""
self.vif_type = portbindings.VIF_TYPE_OVS
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
LOG.info(_LI("Initializing CRD client... "))
self._crdclient = config.get_crdclient()
# Network Management
@staticmethod
@log.log
def _prepare_crd_network(network, segments):
"""Helper function to create 'network' data."""
return {'network':
{'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'status': network['status'],
'admin_state_up': network['admin_state_up'],
'segments': segments,
}}
def create_network_postcommit(self, context):
"""Send create_network data to CRD service."""
network = context.current
segments = context.network_segments
body = self._prepare_crd_network(network, segments)
self._crdclient.create_network(body=body)
LOG.debug("create_network update sent to CRD Server: %s", body)
def update_network_postcommit(self, context):
"""Send update_network data to CRD service."""
network = context.current
segments = context.network_segments
body = self._prepare_crd_network(network, segments)
self._crdclient.update_network(network['id'], body=body)
LOG.debug("update_network update sent to CRD Server: %s", body)
def delete_network_postcommit(self, context):
"""Send delete_network data to CRD service."""
network = context.current
self._crdclient.delete_network(network['id'])
LOG.debug(
"delete_network update sent to CRD Server: %s",
network['id'])
# Port Management
@staticmethod
def _prepare_crd_port(port):
"""Helper function to prepare 'port' data."""
crd_subnet_id = ''
crd_ipaddress = ''
crd_sec_grps = ''
# Since CRD accepts one Fixed IP,
# so handle only one fixed IP per port.
if len(port['fixed_ips']) > 1:
LOG.debug("More than one fixed IP exists - using first one.")
# check empty fixed_ips list, move on if one or more exists
if len(port['fixed_ips']) != 0:
crd_subnet_id = port['fixed_ips'][0]['subnet_id']
crd_ipaddress = port['fixed_ips'][0]['ip_address']
LOG.debug("Handling fixed IP {subnet_id:%(subnet)s, "
"ip_address:%(ip)s}",
{'subnet': crd_subnet_id, 'ip': crd_ipaddress})
else:
LOG.debug("No fixed IPs found.")
if 'security_groups' in port:
crd_sec_grps = ','.join(port['security_groups'])
return {'port':
{'port_id': port['id'],
'tenant_id': port['tenant_id'],
'name': port['name'],
'network_id': port['network_id'],
'subnet_id': crd_subnet_id,
'mac_address': port['mac_address'],
'device_id': port['device_id'],
'ip_address': crd_ipaddress,
'admin_state_up': port['admin_state_up'],
'status': port['status'],
'device_owner': port['device_owner'],
'security_groups': crd_sec_grps,
}}
def create_port_postcommit(self, context):
"""Send create_port data to CRD service."""
port = context.current
body = self._prepare_crd_port(port)
self._crdclient.create_port(body=body)
LOG.debug("create_port update sent to CRD Server: %s", body)
def delete_port_postcommit(self, context):
"""Send delete_port data to CRD service."""
port = context.current
self._crdclient.delete_port(port['id'])
LOG.debug("delete_port update sent to CRD Server: %s", port['id'])
# Subnet Management
@staticmethod
@log.log
def _prepare_crd_subnet(subnet):
"""Helper function to prepare 'subnet' data."""
crd_allocation_pools = ''
crd_dns_nameservers = ''
crd_host_routes = ''
# Handling Allocation IPs
if 'allocation_pools' in subnet:
a_pools = subnet['allocation_pools']
crd_allocation_pools = ','.join(["%s-%s" % (p['start'],
p['end'])
for p in a_pools])
# Handling Host Routes
if 'host_routes' in subnet:
crd_host_routes = ','.join(["%s-%s" % (r['destination'],
r['nexthop'])
for r in subnet['host_routes']])
# Handling DNS Nameservers
if 'dns_nameservers' in subnet:
crd_dns_nameservers = ','.join(subnet['dns_nameservers'])
# return Subnet Data
return {'subnet':
{'subnet_id': subnet['id'],
'tenant_id': subnet['tenant_id'],
'name': subnet['name'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': subnet['cidr'],
'gateway_ip': subnet['gateway_ip'],
'dns_nameservers': crd_dns_nameservers,
'allocation_pools': crd_allocation_pools,
'host_routes': crd_host_routes,
}}
def create_subnet_postcommit(self, context):
"""Send create_subnet data to CRD service."""
subnet = context.current
body = self._prepare_crd_subnet(subnet)
self._crdclient.create_subnet(body=body)
LOG.debug("create_subnet update sent to CRD Server: %s", body)
def update_subnet_postcommit(self, context):
"""Send update_subnet data to CRD service."""
subnet = context.current
body = self._prepare_crd_subnet(subnet)
self._crdclient.update_subnet(subnet['id'], body=body)
LOG.debug("update_subnet update sent to CRD Server: %s", body)
def delete_subnet_postcommit(self, context):
"""Send delete_subnet data to CRD service."""
subnet = context.current
self._crdclient.delete_subnet(subnet['id'])
LOG.debug("delete_subnet update sent to CRD Server: %s", subnet['id'])
def bind_port(self, context):
"""Set porting binding data for use with nova."""
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
# Prepared porting binding data
for segment in context.network.network_segments:
if self.check_segment(segment):
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details,
status=n_const.PORT_STATUS_ACTIVE)
LOG.debug("Bound using segment: %s", segment)
return
else:
LOG.debug("Refusing to bind port for segment ID %(id)s, "
"segment %(seg)s, phys net %(physnet)s, and "
"network type %(nettype)s",
{'id': segment[api.ID],
'seg': segment[api.SEGMENTATION_ID],
'physnet': segment[api.PHYSICAL_NETWORK],
'nettype': segment[api.NETWORK_TYPE]})
@log.log
def check_segment(self, segment):
"""Verify a segment is valid for the FSL SDN MechanismDriver."""
return segment[api.NETWORK_TYPE] in [constants.TYPE_VLAN,
constants.TYPE_VXLAN] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# DXR documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 18:40:04 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz'
]
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DXR'
copyright = u'2014, various'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DXRdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DXR.tex', u'DXR Documentation',
u'various', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dxr', u'DXR Documentation',
[u'various'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DXR', u'DXR Documentation',
u'various', 'DXR', 'Code search and cross-reference tool',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'DXR'
epub_author = u'various'
epub_publisher = u'various'
epub_copyright = u'2014, various'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'DXR'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None} | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActiveRecord
module Scoping
extend ActiveSupport::Concern
included do
include Default
include Named
end
module ClassMethods # :nodoc:
# Collects attributes from scopes that should be applied when creating
# an AR instance for the particular class this is called on.
def scope_attributes
all.scope_for_create
end
# Are there attributes associated with this scope?
def scope_attributes?
current_scope
end
def current_scope(skip_inherited_scope = false)
ScopeRegistry.current_scope(self, skip_inherited_scope)
end
def current_scope=(scope)
ScopeRegistry.set_current_scope(self, scope)
end
def global_current_scope(skip_inherited_scope = false)
ScopeRegistry.global_current_scope(self, skip_inherited_scope)
end
def global_current_scope=(scope)
ScopeRegistry.set_global_current_scope(self, scope)
end
def scope_registry
ScopeRegistry.instance
end
end
def populate_with_current_scope_attributes # :nodoc:
return unless self.class.scope_attributes?
attributes = self.class.scope_attributes
_assign_attributes(attributes) if attributes.any?
end
def initialize_internals_callback # :nodoc:
super
populate_with_current_scope_attributes
end
# This class stores the +:current_scope+ and +:ignore_default_scope+ values
# for different classes. The registry is stored as either a thread or fiber
# local depending on the application configuration.
#
# This class allows you to store and get the scope values on different
# classes and different types of scopes. For example, if you are attempting
# to get the current_scope for the +Board+ model, then you would use the
# following code:
#
# registry = ActiveRecord::Scoping::ScopeRegistry
# registry.set_current_scope(Board, some_new_scope)
#
# Now when you run:
#
# registry.current_scope(Board)
#
# You will obtain whatever was defined in +some_new_scope+.
class ScopeRegistry # :nodoc:
class << self
delegate :current_scope, :set_current_scope, :ignore_default_scope, :set_ignore_default_scope,
:global_current_scope, :set_global_current_scope, to: :instance
def instance
ActiveSupport::IsolatedExecutionState[:active_record_scope_registry] ||= new
end
end
def initialize
@current_scope = {}
@ignore_default_scope = {}
@global_current_scope = {}
end
def current_scope(model, skip_inherited_scope = false)
value_for(@current_scope, model, skip_inherited_scope)
end
def set_current_scope(model, value)
set_value_for(@current_scope, model, value)
end
def ignore_default_scope(model, skip_inherited_scope = false)
value_for(@ignore_default_scope, model, skip_inherited_scope)
end
def set_ignore_default_scope(model, value)
set_value_for(@ignore_default_scope, model, value)
end
def global_current_scope(model, skip_inherited_scope = false)
value_for(@global_current_scope, model, skip_inherited_scope)
end
def set_global_current_scope(model, value)
set_value_for(@global_current_scope, model, value)
end
private
# Obtains the value for a given +scope_type+ and +model+.
def value_for(scope_type, model, skip_inherited_scope = false)
return scope_type[model.name] if skip_inherited_scope
klass = model
base = model.base_class
while klass != base
value = scope_type[klass.name]
return value if value
klass = klass.superclass
end
scope_type[klass.name]
end
# Sets the +value+ for a given +scope_type+ and +model+.
def set_value_for(scope_type, model, value)
scope_type[model.name] = value
end
end
end
end | ruby | github | https://github.com/rails/rails | activerecord/lib/active_record/scoping.rb |
# SPDX-License-Identifier: GPL-2.0-only
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/maxim,max77802.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX77802 Power Management IC
maintainers:
- Javier Martinez Canillas <javier@dowhile0.org>
- Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77802 Power Management
Integrated Circuit (PMIC).
The Maxim MAX77802 is a Power Management IC which includes voltage and
current regulators (10 high efficiency Buck regulators and 32 Low-DropOut
(LDO)), RTC and clock outputs.
The MAX77802 provides two 32.768khz clock outputs that can be controlled
(gated/ungated) over I2C. The clock IDs are defined as preprocessor macros
in dt-bindings/clock/maxim,max77802.h.
properties:
compatible:
const: maxim,max77802
'#clock-cells':
const: 1
interrupts:
maxItems: 1
reg:
maxItems: 1
regulators:
$ref: /schemas/regulator/maxim,max77802.yaml
description:
List of child nodes that specify the regulators.
inb1-supply:
description: Power supply for buck1
inb2-supply:
description: Power supply for buck2
inb3-supply:
description: Power supply for buck3
inb4-supply:
description: Power supply for buck4
inb5-supply:
description: Power supply for buck5
inb6-supply:
description: Power supply for buck6
inb7-supply:
description: Power supply for buck7
inb8-supply:
description: Power supply for buck8
inb9-supply:
description: Power supply for buck9
inb10-supply:
description: Power supply for buck10
inl1-supply:
description: Power supply for LDO8, LDO15
inl2-supply:
description: Power supply for LDO17, LDO27, LDO30, LDO35
inl3-supply:
description: Power supply for LDO3, LDO5, LDO7, LDO7
inl4-supply:
description: Power supply for LDO10, LDO11, LDO13, LDO14
inl5-supply:
description: Power supply for LDO9, LDO19
inl6-supply:
description: Power supply for LDO4, LDO21, LDO24, LDO33
inl7-supply:
description: Power supply for LDO18, LDO20, LDO28, LDO29
inl9-supply:
description: Power supply for LDO12, LDO23, LDO25, LDO26, LDO32, LDO34
inl10-supply:
description: Power supply for LDO1, LDO2
wakeup-source: true
required:
- compatible
- '#clock-cells'
- reg
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/regulator/maxim,max77802.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
pmic@9 {
compatible = "maxim,max77802";
interrupt-parent = <&gpx3>;
interrupts = <1 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&max77802_irq>, <&pmic_selb>,
<&pmic_dvs_1>, <&pmic_dvs_2>, <&pmic_dvs_3>;
wakeup-source;
reg = <0x9>;
#clock-cells = <1>;
inb1-supply = <&tps65090_dcdc2>;
inb2-supply = <&tps65090_dcdc1>;
inb3-supply = <&tps65090_dcdc2>;
inb4-supply = <&tps65090_dcdc2>;
inb5-supply = <&tps65090_dcdc1>;
inb6-supply = <&tps65090_dcdc2>;
inb7-supply = <&tps65090_dcdc1>;
inb8-supply = <&tps65090_dcdc1>;
inb9-supply = <&tps65090_dcdc1>;
inb10-supply = <&tps65090_dcdc1>;
inl1-supply = <&buck5_reg>;
inl2-supply = <&buck7_reg>;
inl3-supply = <&buck9_reg>;
inl4-supply = <&buck9_reg>;
inl5-supply = <&buck9_reg>;
inl6-supply = <&tps65090_dcdc2>;
inl7-supply = <&buck9_reg>;
inl9-supply = <&tps65090_dcdc2>;
inl10-supply = <&buck7_reg>;
regulators {
BUCK1 {
regulator-name = "vdd_mif";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1300000>;
regulator-always-on;
regulator-boot-on;
regulator-ramp-delay = <12500>;
regulator-state-mem {
regulator-off-in-suspend;
};
};
BUCK2 {
regulator-name = "vdd_arm";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
regulator-boot-on;
regulator-ramp-delay = <12500>;
regulator-coupled-with = <&buck3_reg>;
regulator-coupled-max-spread = <300000>;
regulator-state-mem {
regulator-off-in-suspend;
};
};
// ...
BUCK10 {
regulator-name = "vdd_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
};
};
LDO1 {
regulator-name = "vdd_1v0";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
regulator-always-on;
regulator-initial-mode = <MAX77802_OPMODE_NORMAL>;
regulator-state-mem {
regulator-on-in-suspend;
regulator-mode = <MAX77802_OPMODE_LP>;
};
};
// ...
LDO35 {
regulator-name = "ldo_35";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
};
};
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mfd/maxim,max77802.yaml |
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import shlex
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connections import ConnectionBase
from ansible.utils.path import is_executable
from ansible.utils.unicode import to_bytes
class Connection(ConnectionBase):
''' Local chroot based connections '''
BUFSIZE = 65536
has_pipelining = False
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.chroot = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("chroot connection requires running as root")
# we're running as root on the local system so do some
# trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
raise AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
if not is_executable(chrootsh):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
if not self.chroot_cmd:
raise AnsibleError("chroot command not found in PATH")
@property
def transport(self):
''' used to identify this connection object '''
return 'chroot'
def _connect(self, port=None):
''' connect to the chroot; nothing to do here '''
self._display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
return self
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
else:
# Prev to python2.7.3, shlex couldn't handle unicode type strings
cmd = to_bytes(cmd)
cmd = shlex.split(cmd)
local_cmd = [self.chroot_cmd, self.chroot]
local_cmd += cmd
return local_cmd
def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
''' run a command on the chroot. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
if sudoable and self._play_context.become and self._play_context.become_method not in self.become_methods_supported:
raise AnsibleError("Internal Error: this module does not support running commands via %s" % self._play_context.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
self._display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
# FIXME: cwd= needs to be set to the basedir of the playbook, which
# should come from loader, but is not in the connection plugins
p = subprocess.Popen(local_cmd, shell=False,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to chroot '''
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, self.BUFSIZE), None, stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from chroot to local '''
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, self.BUFSIZE), None)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(self.BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(self.BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
pass | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.jvm import JavaLexer, ScalaLexer
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers.d import DLexer
from pygments.lexers.objective import ObjectiveCLexer, \
ObjectiveCppLexer, LogosLexer
from pygments.lexers.go import GoLexer
from pygments.lexers.rust import RustLexer
from pygments.lexers.c_like import ECLexer, ValaLexer, CudaLexer
from pygments.lexers.pascal import DelphiLexer, Modula2Lexer, AdaLexer
from pygments.lexers.business import CobolLexer, CobolFreeformatLexer
from pygments.lexers.fortran import FortranLexer
from pygments.lexers.prolog import PrologLexer
from pygments.lexers.python import CythonLexer
from pygments.lexers.graphics import GLShaderLexer
from pygments.lexers.ml import OcamlLexer
from pygments.lexers.basic import BlitzBasicLexer, BlitzMaxLexer, MonkeyLexer
from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer
from pygments.lexers.ooc import OocLexer
from pygments.lexers.felix import FelixLexer
from pygments.lexers.nimrod import NimrodLexer
__all__ = [] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_SIZE
class PrioritiseTransactionTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1", "-maxmempool=10"]))
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
PrioritiseTransactionTest().main() | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v1beta1",
"metadata": {
"name": "v0alpha1.timeseries-y-ticks-zero-decimals.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 339,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 32,
"w": 4,
"x": 0,
"y": 0
},
"id": 2,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 32,
"w": 4,
"x": 4,
"y": 0
},
"id": 5,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 17,
"w": 4,
"x": 8,
"y": 0
},
"id": 6,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 17,
"w": 4,
"x": 12,
"y": 0
},
"id": 3,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 10,
"w": 4,
"x": 16,
"y": 0
},
"id": 7,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 10,
"w": 4,
"x": 20,
"y": 0
},
"id": 4,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 22,
"w": 4,
"x": 16,
"y": 10
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 22,
"w": 4,
"x": 20,
"y": 10
},
"id": 9,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"refId": "A",
"scenarioId": "csv_metric_values",
"stringInput": "1,2.3"
}
],
"title": "Panel Title",
"type": "timeseries"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 15,
"w": 8,
"x": 8,
"y": 17
},
"id": 11,
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"title": "Panel Title",
"type": "timeseries"
}
],
"refresh": "",
"schemaVersion": 42,
"tags": [
"gdev",
"panel-tests",
"graph-ng"
],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Zero Decimals Y Ticks",
"uid": "kGvJCmGVz",
"weekStart": ""
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-timeseries/v0alpha1.timeseries-y-ticks-zero-decimals.v42.v1beta1.json |
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IPv6-related utilities and helper functions.
"""
import os
import netaddr
from oslo_log import log
from neutron.common import constants
from neutron.i18n import _LI
LOG = log.getLogger(__name__)
_IS_IPV6_ENABLED = None
def get_ipv6_addr_by_EUI64(prefix, mac):
# Check if the prefix is IPv4 address
isIPv4 = netaddr.valid_ipv4(prefix)
if isIPv4:
msg = _("Unable to generate IP address by EUI64 for IPv4 prefix")
raise TypeError(msg)
try:
eui64 = int(netaddr.EUI(mac).eui64())
prefix = netaddr.IPNetwork(prefix)
return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
except (ValueError, netaddr.AddrFormatError):
raise TypeError(_('Bad prefix or mac format for generating IPv6 '
'address by EUI-64: %(prefix)s, %(mac)s:')
% {'prefix': prefix, 'mac': mac})
except TypeError:
raise TypeError(_('Bad prefix type for generate IPv6 address by '
'EUI-64: %s') % prefix)
def is_enabled():
global _IS_IPV6_ENABLED
if _IS_IPV6_ENABLED is None:
disabled_ipv6_path = "/proc/sys/net/ipv6/conf/default/disable_ipv6"
if os.path.exists(disabled_ipv6_path):
with open(disabled_ipv6_path, 'r') as f:
disabled = f.read().strip()
_IS_IPV6_ENABLED = disabled == "0"
else:
_IS_IPV6_ENABLED = False
if not _IS_IPV6_ENABLED:
LOG.info(_LI("IPv6 is not enabled on this system."))
return _IS_IPV6_ENABLED
def is_auto_address_subnet(subnet):
"""Check if subnet is an auto address subnet."""
modes = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS]
return (subnet['ipv6_address_mode'] in modes
or subnet['ipv6_ra_mode'] in modes)
def is_eui64_address(ip_address):
"""Check if ip address is EUI64."""
ip = netaddr.IPAddress(ip_address)
# '0xfffe' addition is used to build EUI-64 from MAC (RFC4291)
# Look for it in the middle of the EUI-64 part of address
return ip.version == 6 and not ((ip & 0xffff000000) ^ 0xfffe000000)
def is_ipv6_pd_enabled(subnet):
"""Returns True if the subnetpool_id of the given subnet is equal to
constants.IPV6_PD_POOL_ID
"""
return subnet.get('subnetpool_id') == constants.IPV6_PD_POOL_ID | unknown | codeparrot/codeparrot-clean | ||
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from constants import eStart, eError, eItsMe
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import EUCJPDistributionAnalysis
from jpcntx import EUCJPContextAnalysis
from mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf) | unknown | codeparrot/codeparrot-clean | ||
#
# (C) Copyright 2003-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Presence XMPP stanza handling
Normative reference:
- `RFC 3920 <http://www.ietf.org/rfc/rfc3920.txt>`__
"""
__docformat__ = "restructuredtext en"
from .etree import ElementTree, ElementClass
from .exceptions import BadRequestProtocolError
from .stanza import Stanza
PRESENCE_TYPES = ("available", "unavailable", "probe",
"subscribe", "unsubscribe", "subscribed", "unsubscribed",
"invisible", "error")
ACCEPT_RESPONSES = {
"subscribe": "subscribed",
"subscribed": "subscribe",
"unsubscribe": "unsubscribed",
"unsubscribed": "unsubscribe",
}
DENY_RESPONSES = {
"subscribe": "unsubscribed",
"subscribed": "unsubscribe",
"unsubscribe": "subscribed",
"unsubscribed": "subscribe",
}
class Presence(Stanza):
"""<presence /> stanza.
"""
# pylint: disable-msg=R0902,R0904
element_name = "presence"
def __init__(self, element = None, from_jid = None, to_jid = None,
stanza_type = None, stanza_id = None,
error = None, error_cond = None, return_path = None,
language = None,
show = None, status = None, priority = None):
"""Initialize a `Presence` object.
:Parameters:
- `element`: XML element
- `from_jid`: sender JID.
- `to_jid`: recipient JID.
- `stanza_type`: staza type: one of: None, "available",
"unavailable", "subscribe", "subscribed", "unsubscribe",
"unsubscribed" or "error". "available" is automaticaly changed to
None.
- `stanza_id`: stanza id -- value of stanza's "id" attribute
- `language`: default language for the stanza content
- `show`: "show" field of presence stanza. One of: None, "away",
"xa", "dnd", "chat".
- `status`: descriptive text for the presence stanza.
- `priority`: presence priority.
- `error_cond`: error condition name. Ignored if `stanza_type` is
not "error"
:Types:
- `element`: :etree:`ElementTree.Element`
- `from_jid`: `JID`
- `to_jid`: `JID`
- `stanza_type`: `str`
- `stanza_id`: `str`
- `language`: `str`
- `show`: `str`
- `status`: `str`
- `priority`: `int`
- `error_cond`: `str`
"""
# pylint: disable-msg=R0913
self._show = None
self._status = None
self._priority = 0
if element is None:
element = "presence"
elif not isinstance(element, ElementClass):
raise TypeError("Couldn't make Presence from " + repr(element))
if stanza_type is not None and stanza_type not in PRESENCE_TYPES:
raise ValueError("Bad presence type")
elif stanza_type == 'available':
stanza_type = None
Stanza.__init__(self, element, from_jid = from_jid, to_jid = to_jid,
stanza_type = stanza_type, stanza_id = stanza_id,
error = error, error_cond = error_cond,
return_path = return_path, language = language)
if self.element_name != "presence":
raise ValueError("The element is not <presence />")
self._show_tag = self._ns_prefix + "show"
self._status_tag = self._ns_prefix + "status"
self._priority_tag = self._ns_prefix + "priority"
if self._element is not None:
self._decode_subelements()
if show is not None:
self.show = show
if status is not None:
self.status = status
if priority is not None:
self.priority = priority
def _decode_subelements(self):
"""Decode the stanza subelements."""
for child in self._element:
if child.tag == self._show_tag:
self._show = child.text
elif child.tag == self._status_tag:
self._status = child.text
elif child.tag == self._priority_tag:
try:
self._priority = int(child.text.strip())
if self._priority < -128 or self._priority > 127:
raise ValueError
except ValueError:
raise BadRequestProtocolError(
"Presence priority not an integer")
def as_xml(self):
"""Return the XML stanza representation.
Always return an independent copy of the stanza XML representation,
which can be freely modified without affecting the stanza.
:returntype: :etree:`ElementTree.Element`"""
result = Stanza.as_xml(self)
if self._show:
child = ElementTree.SubElement(result, self._show_tag)
child.text = self._show
if self._status:
child = ElementTree.SubElement(result, self._status_tag)
child.text = self._status
if self._priority:
child = ElementTree.SubElement(result, self._priority_tag)
child.text = str(self._priority)
return result
def copy(self):
"""Create a deep copy of the stanza.
:returntype: `Presence`"""
result = Presence(None, self.from_jid, self.to_jid,
self.stanza_type, self.stanza_id, self.error,
self._return_path(),
self._show, self._status, self._priority)
if self._payload is None:
self.decode_payload()
for payload in self._payload:
result.add_payload(payload.copy())
return result
@property
def show(self): # pylint: disable-msg=E0202
"""Presence status type.
:returntype: `str`
"""
return self._show
@show.setter # pylint: disable-msg=E1101
def show(self, show): # pylint: disable-msg=E0202,E0102,C0111
self._show = str(show)
self._dirty = True
@property
def status(self): # pylint: disable-msg=E0202
"""Presence status message.
:returntype: `str`
"""
return self._status
@status.setter # pylint: disable-msg=E1101
def status(self, status): # pylint: disable-msg=E0202,E0102,C0111
self._status = str(status)
self._dirty = True
@property
def priority(self): # pylint: disable-msg=E0202
"""Presence priority.
:returntype: `str`
"""
return self._priority
@priority.setter # pylint: disable-msg=E1101
def priority(self, priority): # pylint: disable-msg=E0202,E0102,C0111
priority = int(priority)
if priority < -128 or priority > 127:
raise ValueError("Priority must be in the (-128, 128) range")
self._priority = priority
self._dirty = True
def make_accept_response(self):
"""Create "accept" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new stanza.
:returntype: `Presence`
"""
if self.stanza_type not in ("subscribe", "subscribed",
"unsubscribe", "unsubscribed"):
raise ValueError("Results may only be generated for 'subscribe',"
"'subscribed','unsubscribe' or 'unsubscribed' presence")
stanza = Presence(stanza_type = ACCEPT_RESPONSES[self.stanza_type],
from_jid = self.to_jid, to_jid = self.from_jid,
stanza_id = self.stanza_id)
return stanza
def make_deny_response(self):
"""Create "deny" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new presence stanza.
:returntype: `Presence`
"""
if self.stanza_type not in ("subscribe", "subscribed",
"unsubscribe", "unsubscribed"):
raise ValueError("Results may only be generated for 'subscribe',"
"'subscribed','unsubscribe' or 'unsubscribed' presence")
stanza = Presence(stanza_type = DENY_RESPONSES[self.stanza_type],
from_jid = self.to_jid, to_jid = self.from_jid,
stanza_id = self.stanza_id)
return stanza
def make_error_response(self, cond):
"""Create error response for the any non-error presence stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:Types:
- `cond`: `str`
:return: new presence stanza.
:returntype: `Presence`
"""
if self.stanza_type == "error":
raise ValueError("Errors may not be generated in response"
" to errors")
stanza = Presence(stanza_type = "error", from_jid = self.from_jid,
to_jid = self.to_jid, stanza_id = self.stanza_id,
status = self._status, show = self._show,
priority = self._priority, error_cond = cond)
if self._payload is None:
self.decode_payload()
for payload in self._payload:
stanza.add_payload(payload)
return stanza
# vi: sts=4 et sw=4 | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to set testcases up for Swift and/or S3 tests.
"""
from __future__ import print_function
import BaseHTTPServer
import threading
from oslo_utils import units
FIVE_KB = 5 * units.Ki
class RemoteImageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(self):
"""
Respond to an image HEAD request fake metadata
"""
if 'images' in self.path:
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', FIVE_KB)
self.end_headers()
return
else:
self.send_error(404, 'File Not Found: %s' % self.path)
return
def do_GET(self):
"""
Respond to an image GET request with fake image content.
"""
if 'images' in self.path:
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', FIVE_KB)
self.end_headers()
image_data = '*' * FIVE_KB
self.wfile.write(image_data)
self.wfile.close()
return
else:
self.send_error(404, 'File Not Found: %s' % self.path)
return
def log_message(self, format, *args):
"""
Simple override to prevent writing crap to stderr...
"""
pass
def setup_http(test):
server_class = BaseHTTPServer.HTTPServer
remote_server = server_class(('127.0.0.1', 0), RemoteImageHandler)
remote_ip, remote_port = remote_server.server_address
def serve_requests(httpd):
httpd.serve_forever()
threading.Thread(target=serve_requests, args=(remote_server,)).start()
test.http_server = remote_server
test.http_ip = remote_ip
test.http_port = remote_port
test.addCleanup(test.http_server.shutdown)
def get_http_uri(test, image_id):
uri = 'http://%(http_ip)s:%(http_port)d/images/' % test.__dict__
uri += image_id
return uri | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the test module to test query function of blade.
"""
import blade_test
class TestQuery(blade_test.TargetTest):
"""Test cc_library """
def setUp(self):
"""setup method. """
self.doSetUp('test_query', full_targets=['...'], command='query')
self.query_targets = ['test_query:poppy']
self.all_targets = self.blade.get_build_targets()
def testQueryCorrectly(self):
"""Test query targets dependency relationship correctly. """
self.assertTrue(self.all_targets)
result_map = {}
result_map = self.blade.query_helper(self.query_targets)
all_targets = self.blade.get_build_targets()
query_key = ('test_query', 'poppy')
self.assertTrue(query_key in result_map.keys())
deps = result_map.get(query_key, [])[0]
depended_by = result_map.get(query_key, [])[1]
self.assertTrue(deps)
self.assertTrue(depended_by)
dep_one_key = ('test_query', 'rpc_meta_info_proto')
dep_second_key = ('test_query', 'static_resource')
self.assertTrue(dep_one_key in deps)
self.assertTrue(dep_second_key in deps)
depended_one_key = ('test_query', 'poppy_client')
depended_second_key = ('test_query', 'poppy_mock')
self.assertTrue(depended_one_key in depended_by)
self.assertTrue(depended_second_key in depended_by)
if __name__ == '__main__':
blade_test.run(TestQuery) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2020 The Weakly-Supervised Control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file was modified from `https://github.com/google-research/google-research/blob/master/weak_disentangle`.
"""Base modules.
"""
from collections import OrderedDict
import tensorflow as tf
from weakly_supervised_control.disentanglement.tensorsketch import utils as tsu
def build_with_name_scope(build_parameters):
@tf.Module.with_name_scope
def build_params_once_with_ns(self, *args):
assert not self.built, "{}.built already True".format(self.name)
build_parameters(self, *args)
self.built = True
return build_params_once_with_ns
class Repr(object):
"""Representation object.
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
class Module(tf.Module):
"""Abstract module class.
Module is a tree-structured class that can contain other Module objects.
Traversal of the tree is supported via iterating through _child_modules. All
models and layers should be subclasses of Module. This class provides support
for several useful features: setting train/eval, tracking child modules,
tracking tf.Variables, in/out hooks, mapping a function into the Module tree
(the apply function), and printing the module as a string representation (for
which we support printing at various levels of verbosity).
"""
# Levels of read priority
WITH_NAMES = 0
WITH_EXTRA = 1
WITH_VARS = 2
WITH_DTYPE = 3
WITH_NUMPY = 4
def __init__(self, name=None):
"""Module initializer.
Args:
name: string for the name of the module used for tf name scoping.
"""
# Special construction of _child_modules and _variables
# to avoid triggering self.__setattr__
self.__dict__["_blacklist"] = set()
self.__dict__["_child_modules"] = OrderedDict()
self.__dict__["_variables"] = OrderedDict()
super().__init__(name=name)
self.training = True
self.built = False
self.in_hooks = OrderedDict()
self.out_hooks = OrderedDict()
def __setattr__(self, name, value):
# We catch non-blacklisted variables for the purposes of repr construction
# only and do # not affect the computational graph.
try:
if name not in self._blacklist:
if isinstance(value, Module):
self._child_modules.update({name: value})
else:
if name in self._child_modules:
del self._child_modules[name]
if isinstance(value, tf.Variable):
self._variables.update({name: value})
else:
if name in self._variables:
del self._variables[name]
except AttributeError as e:
raise AttributeError(
"Call super().__init__() before assigning variable to Module instance"
) from e
# tf.Module makes modifications important to graph construction
super().__setattr__(name, value)
def __delattr__(self, name):
if name not in self._blacklist:
if name in self._child_modules:
del self._child_modules[name]
elif name in self._variables:
del self._variables[name]
super().__delattr__(name)
def select_hooks_dict(self, in_hook):
if in_hook:
return self.in_hooks
else:
return self.out_hooks
def train(self, mode=True):
self.training = mode
for m in self.submodules:
m.train(mode)
def apply(self, fn, filter_fn=None, targets=None):
# Light wrapper to parse filter_fn and targets args
if targets is not None:
assert filter_fn is None, "Cannot use both filter_fn and targets"
def filter_fn(m): return isinstance(m, targets)
elif filter_fn is None:
def filter_fn(m): return True
self._apply(fn, filter_fn)
def _apply(self, fn, filter_fn):
# Apply fn to children first before applying to parent
# This ensures that parent can override children's decisions
# Run in chronological reverse order to get reverse topo+chrono apply
for m in reversed(self._child_modules.values()):
m._apply(fn, filter_fn)
if filter_fn(self):
fn(self)
def eval(self):
self.train(False)
def build(self, *shapes, once=True):
if once:
assert not self.built, "{}.built already True".format(self.name)
tensors = tsu.shapes_to_zeros(*shapes)
self(*tensors)
return self
@build_with_name_scope
def build_parameters(self, *inputs):
pass # By default, module is parameterless
def reset_parameters(self):
pass
def forward(self, *inputs):
return inputs
@tf.Module.with_name_scope
def __call__(self, *inputs):
if not self.built:
self.build_parameters(*inputs)
for hook in self.in_hooks.values():
response = hook(self, *inputs)
if response is not None:
inputs = tsu.pack(response)
outputs = self.forward(*inputs)
for hook in self.out_hooks.values():
response = hook(self, *tsu.pack(outputs))
if response is not None:
outputs = response
return outputs
def __repr__(self):
return self.to_string(verbose=0)
def extra_repr(self):
return ""
def read(self, verbose=0, trainable=None):
return Repr(self.to_string(verbose, trainable))
def to_string(self, verbose=0, trainable=None):
# Level 0: only names
# Level 1: Level 0 + extra repr
# Level 2: Level 1 + variable names and info
# Level 3: Level 2 + dtype info
# Level 4: Level 3 + actual variable info (shortened)
main = self.name
if verbose >= 1:
main += self.extra_repr()
if verbose >= 2:
var_body = "\n"
for (name, var) in self._variables.items():
# Skip non-trainable variables if filtering by trainability
if trainable and not var.trainable:
continue
var_body += "{}.{}: shape={}".format(self.name, name,
var._shape_tuple())
if verbose >= 3:
var_body += ", dtype={}".format(repr(var.dtype))
if var.trainable:
var_body += " {train}"
if verbose >= 4:
var_body += "\n" + \
tsu.indent(tsu.shorten(str(var.numpy())))
var_body += "\n"
main += tsu.indent(var_body).rstrip()
body = "\n"
for module in self._child_modules.values():
body += module.to_string(verbose, trainable) + "\n"
main += tsu.indent(body).rstrip()
# Wrap string as Repr object
return main
def flatten_modules(self, filter_fn=None, targets=None):
# Returns a flattened version of tree in reverse topological order
module_list = []
def collect(m):
module_list.append(m)
self.apply(collect, filter_fn, targets)
return list(reversed(module_list))
class ModuleList(Module):
"""Stores a list of Modules.
"""
def __init__(self, *modules, name=None):
"""ModuleList initializer.
Args:
*modules: tuple of modules or a tuple of a single list of modules.
name: name scope for this module.
Raises:
ValueError: input is not modules or a list of modules.
"""
super().__init__(name=name)
self.modules = list(self.disambiguate_modules(modules))
self._child_modules.update(zip(range(len(self.modules)), self.modules))
def disambiguate_modules(self, modules):
# We support passing in either modules as arguments, or a single list
# of modules. In other words, at this point the variable modules should
# either be
# modules = (m, m, ...)
# or
# modules = ((m, m, ...),) or ([m, m, ...],)
# To disambiguate, check if elements of modules is Module.
if tsu.elem_isinstance(modules, Module):
# We leverage isinstance to properly handle edge-case where
# modules=() here.
return modules
elif len(modules) == 1 and tsu.elem_isinstance(modules[0], Module):
return modules[0]
else:
raise ValueError("Input must modules or a list of modules")
def append(self, *modules):
modules = self.disambiguate_modules(modules)
for module in modules:
self._child_modules.update({len(self.modules): module})
self.modules.append(module)
def __iter__(self):
return iter(self.modules)
def __getitem__(self, index):
return self.modules[index]
class Sequential(ModuleList):
"""Stores a list of modules that can be daisy-chained in forward call.
"""
def forward(self, *inputs):
for module in self.modules:
inputs = module(*tsu.pack(inputs))
return inputs | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cgo && windows && !internal
package cgotest
import "testing"
func TestCallbackCallersSEH(t *testing.T) { testCallbackCallersSEH(t) } | go | github | https://github.com/golang/go | src/cmd/cgo/internal/test/seh_windows_test.go |
import os
from pypers.core.step import CmdLineStep
from pypers.utils import utils
from pypers.utils.samplesheet import SampleSheet
class CasavaDemux(CmdLineStep):
spec = {
"name": "CasavaDemux",
"version": "1.8.2",
"descr": [
"Runs Casava demultiplexing"
],
"url": "https://support.illumina.com/sequencing/sequencing_software/casava.html",
"args":
{
"inputs": [
{
"name" : "input_dir",
"type" : "dir",
"descr" : "the name of the input directory",
},
{
"name" : "sample_sheet",
"type" : "file",
"descr" : "the path to the validated sample sheet",
},
],
"params" : [
{
"name" : "threads",
"type" : "int",
"value" : 4,
"descr" : "the number of threads used by the process",
}
],
"outputs": [
{
"name" : "output_files",
"type" : "file",
"descr" : "output file names",
}
]
},
"cmd": [
"/software/pypers/casava/casava-1.8.2/bin/configureBclToFastq.pl",
"--input-dir {{input_dir}}",
"--output-dir {{output_dir}}",
"--sample-sheet {{sample_sheet}}",
"--use-bases-mask {{use_base_mask}}",
"--fastq-cluster-count 1000000000",
"--force",
"--with-failed-reads",
"--ignore-missing-bcl",
"--ignore-missing-control",
" && make -j {{threads}} -C {{output_dir}} all",
],
"extra_env": {
'LD_LIBRARY_PATH' : '/software/pypers/python/Anaconda-2.1.0/lib'
},
"requirements": {
"cpus": "8"
}
}
def process(self):
# Reduce inputs to only first element
if hasattr(self.input_dir, '__iter__'):
self.input_dir = self.input_dir[0]
self.input_dir = os.path.join(self.input_dir, "Data/Intensities/BaseCalls/")
if type(self.sample_sheet) == list:
if len(self.sample_sheet) > 1:
raise Exception('Too many sample sheet files: %s' % ','.join(self.sample_sheet))
else:
self.sample_sheet = self.sample_sheet[0]
ss = SampleSheet(self.sample_sheet)
mask_length, double_idx = ss.get_mask_length()
if double_idx:
self.use_base_mask = "y*,I{0},I{0},Y*".format(mask_length)
else:
self.use_base_mask = "y*,I{0},Y*".format(mask_length)
self.use_base_mask = str(self.use_base_mask)
super(CasavaDemux, self).process()
prj_dir = os.path.join(self.output_dir, 'Project_' + self.meta['pipeline']['project_name'])
self.output_files = utils.find(prj_dir, "*.fastq.gz")
#set the metadata
self.meta['job']['sample_id'] = []
sample_ids = ss.get_sample_ids()
for output_file in self.output_files:
for sample_id in sample_ids:
if os.path.basename(output_file).startswith("%s_" % sample_id):
self.meta['job']['sample_id'].append(sample_id)
break | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_ELF_RELOCATION_H
#define LIEF_ELF_RELOCATION_H
#include <ostream>
#include "LIEF/Object.hpp"
#include "LIEF/visibility.h"
#include "LIEF/errors.hpp"
#include "LIEF/Abstract/Relocation.hpp"
#include "LIEF/ELF/enums.hpp"
#include "LIEF/ELF/Header.hpp"
namespace LIEF {
namespace ELF {
class Parser;
class Binary;
class Builder;
class Symbol;
class Section;
/// Class that represents an ELF relocation.
class LIEF_API Relocation : public LIEF::Relocation {
friend class Parser;
friend class Binary;
friend class Builder;
public:
/// The *purpose* of a relocation defines how this relocation is used by the
/// loader.
enum class PURPOSE {
NONE = 0,
PLTGOT = 1, ///< The relocation is associated with the PLT/GOT resolution
DYNAMIC = 2, ///< The relocation is used for regulard data/code relocation
OBJECT = 3, ///< The relocation is used in an object file
};
enum class ENCODING {
UNKNOWN = 0,
REL, ///< The relocation is using the regular Elf_Rel structure
RELA, ///< The relocation is using the regular Elf_Rela structure
RELR, ///< The relocation is using the relative relocation format
ANDROID_SLEB, ///< The relocation is using the packed Android-SLEB128 format
};
static constexpr uint64_t R_BIT = 27;
static constexpr uint64_t R_MASK = (uint64_t(1) << R_BIT) - 1;
static constexpr uint64_t R_X64 = uint64_t(1) << R_BIT;
static constexpr uint64_t R_AARCH64 = uint64_t(2) << R_BIT;
static constexpr uint64_t R_ARM = uint64_t(3) << R_BIT;
static constexpr uint64_t R_HEXAGON = uint64_t(4) << R_BIT;
static constexpr uint64_t R_X86 = uint64_t(5) << R_BIT;
static constexpr uint64_t R_LARCH = uint64_t(6) << R_BIT;
static constexpr uint64_t R_MIPS = uint64_t(7) << R_BIT;
static constexpr uint64_t R_PPC = uint64_t(8) << R_BIT;
static constexpr uint64_t R_PPC64 = uint64_t(9) << R_BIT;
static constexpr uint64_t R_SPARC = uint64_t(10) << R_BIT;
static constexpr uint64_t R_SYSZ = uint64_t(11) << R_BIT;
static constexpr uint64_t R_RISCV = uint64_t(12) << R_BIT;
static constexpr uint64_t R_BPF = uint64_t(13) << R_BIT;
static constexpr uint64_t R_SH4 = uint64_t(14) << R_BIT;
/// The different types of the relocation
enum class TYPE : uint32_t {
UNKNOWN = uint32_t(-1),
#define ELF_RELOC(name, value) name = (value | R_X64),
#include "LIEF/ELF/Relocations/x86_64.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_AARCH64),
#include "LIEF/ELF/Relocations/AArch64.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_ARM),
#include "LIEF/ELF/Relocations/ARM.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_HEXAGON),
#include "LIEF/ELF/Relocations/Hexagon.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_X86),
#include "LIEF/ELF/Relocations/i386.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_LARCH),
#include "LIEF/ELF/Relocations/LoongArch.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_MIPS),
#include "LIEF/ELF/Relocations/Mips.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_PPC),
#include "LIEF/ELF/Relocations/PowerPC.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_PPC64),
#include "LIEF/ELF/Relocations/PowerPC64.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_SPARC),
#include "LIEF/ELF/Relocations/Sparc.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_SYSZ),
#include "LIEF/ELF/Relocations/SystemZ.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_RISCV),
#include "LIEF/ELF/Relocations/RISCV.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_BPF),
#include "LIEF/ELF/Relocations/BPF.def"
#undef ELF_RELOC
#define ELF_RELOC(name, value) name = (value | R_SH4),
#include "LIEF/ELF/Relocations/SH4.def"
#undef ELF_RELOC
};
static TYPE type_from(uint32_t value, ARCH arch);
static uint32_t to_value(TYPE type) {
return static_cast<uint32_t>(type) & R_MASK;
}
Relocation(uint64_t address, TYPE type, ENCODING enc);
Relocation() = default;
Relocation(ARCH arch) {
architecture_ = arch;
}
~Relocation() override = default;
/// Copy constructor.
///
/// \warning When this constructor is invoked, referenced sections or symbols
/// are discarded. This means that on the copied Relocation, Relocation::section,
/// Relocation::symbol and Relocation::symbol_table are set to a nullptr.
Relocation(const Relocation& other) :
LIEF::Relocation{other},
type_{other.type_},
addend_{other.addend_},
encoding_{other.encoding_},
architecture_{other.architecture_}
{}
/// Copy assignment operator.
///
/// Please read the notice of the copy constructor
Relocation& operator=(Relocation other) {
swap(other);
return *this;
}
void swap(Relocation& other) {
std::swap(address_, other.address_);
std::swap(type_, other.type_);
std::swap(addend_, other.addend_);
std::swap(encoding_, other.encoding_);
std::swap(symbol_, other.symbol_);
std::swap(architecture_, other.architecture_);
std::swap(purpose_, other.purpose_);
std::swap(section_, other.section_);
std::swap(symbol_table_, other.symbol_table_);
std::swap(info_, other.info_);
std::swap(binary_, other.binary_);
}
/// Additional value that can be involved in the relocation processing
int64_t addend() const {
return addend_;
}
/// Type of the relocation
TYPE type() const {
return type_;
}
/// Check if the relocation uses the explicit addend() field
/// (this is usually the case for 64 bits binaries)
bool is_rela() const {
return encoding_ == ENCODING::RELA;
}
/// Check if the relocation uses the implicit addend
/// (i.e. not present in the ELF structure)
bool is_rel() const {
return encoding_ == ENCODING::REL;
}
/// True if the relocation is using the relative encoding
bool is_relatively_encoded() const {
return encoding_ == ENCODING::RELR;
}
/// True if the relocation is using the Android packed relocation format
bool is_android_packed() const {
return encoding_ == ENCODING::ANDROID_SLEB;
}
/// Relocation info which contains, for instance, the symbol index
uint32_t info() const {
return info_;
}
/// (re)Compute the *raw* `r_info` attribute based on the given ELF class
uint64_t r_info(Header::CLASS clazz) const {
if (clazz == Header::CLASS::NONE) {
return 0;
}
return clazz == Header::CLASS::ELF32 ?
uint32_t(info()) << 8 | to_value(type()) :
uint64_t(info()) << 32 | (to_value(type()) & 0xffffffffL);
}
/// Target architecture for this relocation
ARCH architecture() const {
return architecture_;
}
PURPOSE purpose() const {
return purpose_;
}
/// The encoding of the relocation
ENCODING encoding() const {
return encoding_;
}
/// True if the semantic of the relocation is `<ARCH>_RELATIVE`
bool is_relative() const {
return type_ == TYPE::AARCH64_RELATIVE || type_ == TYPE::X86_64_RELATIVE ||
type_ == TYPE::X86_RELATIVE || type_ == TYPE::ARM_RELATIVE ||
type_ == TYPE::HEX_RELATIVE || type_ == TYPE::PPC64_RELATIVE ||
type_ == TYPE::PPC_RELATIVE;
}
/// Return the size (in **bits**) of the value associated with this relocation
/// Return -1 if the size can't be determined
size_t size() const override;
/// True if the current relocation is associated with a symbol
bool has_symbol() const {
return symbol_ != nullptr;
}
/// Symbol associated with the relocation (or a nullptr)
Symbol* symbol() {
return symbol_;
}
const Symbol* symbol() const {
return symbol_;
}
/// True if the relocation has an associated section
bool has_section() const {
return section() != nullptr;
}
/// The section in which the relocation is applied (or a nullptr)
Section* section() {
return section_;
}
const Section* section() const {
return section_;
}
/// The associated symbol table (or a nullptr)
Section* symbol_table() {
return symbol_table_;
}
const Section* symbol_table() const {
return symbol_table_;
}
void addend(int64_t addend) {
addend_ = addend;
}
void type(TYPE type) {
type_ = type;
}
void purpose(PURPOSE purpose) {
purpose_ = purpose;
}
void info(uint32_t v) {
info_ = v;
}
void symbol(Symbol* symbol) {
symbol_ = symbol;
}
void section(Section* section) {
section_ = section;
}
void symbol_table(Section* section) {
symbol_table_ = section;
}
/// Try to resolve the value of the relocation such as
/// `*address() = resolve()`
result<uint64_t> resolve(uint64_t base_address = 0) const;
void accept(Visitor& visitor) const override;
LIEF_API friend std::ostream& operator<<(std::ostream& os, const Relocation& entry);
private:
template<class T>
LIEF_LOCAL Relocation(const T& header, PURPOSE purpose, ENCODING enc, ARCH arch);
TYPE type_ = TYPE::UNKNOWN;
int64_t addend_ = 0;
ENCODING encoding_ = ENCODING::UNKNOWN;
Symbol* symbol_ = nullptr;
ARCH architecture_ = ARCH::NONE;
PURPOSE purpose_ = PURPOSE::NONE;
Section* section_ = nullptr;
Section* symbol_table_ = nullptr;
uint32_t info_ = 0;
Binary* binary_ = nullptr;
};
LIEF_API const char* to_string(Relocation::TYPE type);
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/ELF/Relocation.hpp |
from __future__ import unicode_literals
from django.db import transaction
from django.test import TestCase
from django.utils import six
from .models import Article, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
with transaction.atomic():
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_forward_assign_with_queryset(self):
# Ensure that querysets used in m2m assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.a1.publications = [self.p1, self.p2]
qs = self.a1.publications.filter(title='The Python Journal')
self.a1.publications = qs
self.assertEqual(1, self.a1.publications.count())
self.assertEqual(1, qs.count())
def test_reverse_assign_with_queryset(self):
# Ensure that querysets used in M2M assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ReverseManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.p1.article_set = [self.a1, self.a2]
qs = self.p1.article_set.filter(headline='Django lets you build Web apps easily')
self.p1.article_set = qs
self.assertEqual(1, self.p1.article_set.count())
self.assertEqual(1, qs.count())
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>']) | unknown | codeparrot/codeparrot-clean | ||
#
# Author: Gregory Fleischer (gfleischer@gmail.com)
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
# Classes to support parsing Paros converstation log files
import re, time, os
from urllib import parse as urlparse
import logging
class paros_parse_message():
""" Parses Web Scarab message log file into request and result data """
def __init__(self, parosfile):
self.re_message = re.compile(br'^==== (\d+) ==========\s*$')
# XXX: copied from burp log parse, should refactor
self.re_request = re.compile(br'^(\S+)\s+((?:https?://(?:\S+\.)+\w+(?::\d+)?)?/.*)\s+HTTP/\d+\.\d+\s*$')
self.re_response = re.compile(br'^HTTP/\d+\.\d+\s+(\d{3}).*\s*$')
self.re_content_length = re.compile(br'^Content-Length:\s*(\d+)\s*$', re.I)
self.re_chunked = re.compile(br'^Transfer-Encoding:\s*chunked\s*$', re.I)
self.re_date = re.compile(br'^Date:\s*(\w+,.*\w+)\s*$', re.I)
self.re_content_type = re.compile(br'^Content-Type:\s*([-_+0-9a-z.]+/[-_+0-9a-z.]+(?:\s*;\s*\S+=\S+)*)\s*$', re.I)
self.logger = logging.getLogger(__name__)
self.logger.info('Processing Paros message log file: %s' % (parosfile))
self.parosfile = parosfile
self.file = open(self.parosfile, 'rb')
def __iter__(self):
return self
def __normalize_url(self, url):
parsed = urlparse.urlsplit(url)
scheme = parsed.scheme
netloc = parsed.netloc
host = parsed.hostname
# TODO: maybe change this to use hostname and port?
if b'http' == scheme:
netloc = netloc.replace(b':80',b'')
elif b'https' == scheme:
netloc = netloc.replace(b':443',b'')
url = scheme + b'://' + netloc + parsed.path
if parsed.query:
url += b'?' + parsed.query
if parsed.fragment:
url += b'#' + parsed.fragment
return (url, host)
def __fixup_datetime(self, datetime):
if datetime:
try:
tm = time.strptime(str(datetime,'ascii'), '%a, %d %b %Y %H:%M:%S %Z')
tm = time.localtime(time.mktime(tm)-time.timezone)
return bytes(time.asctime(tm),'ascii')
except Exception as e:
self.logger.debug('Failed parsing datetime [%s]: %s' % (datetime, e))
return b''
else:
return b''
def __process_buf(self, buf):
method = b''
status = 0
url = b''
origin = 'PROXY'
host = b''
hostip = b''
datetime = b''
content_type = b''
request, response = None, None
request_buf = []
response_buf = []
have_request, have_response = False, False
request_offset, response_offset = 0, 0
for line in buf:
if have_response:
response_buf.append(line)
m = self.re_date.search(line)
if m:
datetime = self.__fixup_datetime(m.group(1))
else:
m = self.re_content_type.search(line)
if m:
content_type = m.group(1)
if 0 == response_offset and 0 == len(line.rstrip()):
response_offset = len(response_buf)
elif not have_request:
m = self.re_request.search(line)
if m:
method = m.group(1)
requrl = m.group(2)
url, host = self.__normalize_url(requrl)
request_buf.append(line)
have_request = True
else:
m = self.re_response.search(line)
if m:
status = m.group(1)
response_buf.append(line)
have_response = True
else:
request_buf.append(line)
if 0 == request_offset and 0 == len(line.rstrip()):
request_offset = len(request_buf)
if len(request_buf) > 0:
request_header = b''.join(request_buf[0:request_offset])
request_body = b''.join(request_buf[request_offset:])
if 0 == len(request_body.rstrip()):
request_body = b''
request = (request_header, request_body)
if len(response_buf) > 0:
response_header = b''.join(response_buf[0:response_offset])
response_body = b''.join(response_buf[response_offset:])
if 0 == len(response_body.rstrip()):
response_body = b''
response = (response_header, response_body)
if bytes == type(status):
status = int(status.decode('ascii','ignore'))
return (origin, host.decode('utf-8','ignore'), hostip.decode('utf-8','ignore'), url.decode('utf-8','ignore'), status, datetime.decode('utf-8','ignore'), request, response, method.decode('utf-8','ignore'), content_type.decode('utf-8','ignore'), {})
def __next__(self):
have_http_request, have_http_response = False, False
buf = []
while True:
line = self.file.readline()
if not line:
break
m = self.re_message.search(line)
if m:
if len(buf) > 0:
return self.__process_buf(buf)
buf = []
else:
buf.append(line)
if len(buf) > 0:
return self.__process_buf(buf)
self.logger.debug('reached end of file')
self.file.close()
raise(StopIteration)
if '__main__' == __name__:
# test code
import sys
if (len(sys.argv) != 3):
sys.stderr.write('usage: %s [message] [file]\n' % sys.argv[0])
sys.exit(1)
mode = sys.argv[1]
parosfile = sys.argv[2]
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if 'message' == mode:
count = 0
for result in paros_parse_message(parosfile):
print(result)
count += 1
print(('processed %d records' % (count)))
else:
raise Exception | unknown | codeparrot/codeparrot-clean | ||
import Logging
import NIOCore
import NIOHTTP1
import Foundation
import X509
final class HTTPServerRequestDecoder: ChannelDuplexHandler, RemovableChannelHandler {
typealias InboundIn = HTTPServerRequestPart
typealias InboundOut = Request
typealias OutboundIn = Never
enum RequestState {
case ready
case awaitingBody(Request)
case awaitingEnd(Request, ByteBuffer)
case streamingBody(Request.BodyStream)
case skipping
}
var requestState: RequestState
var bodyStreamState: HTTPBodyStreamState
var logger: Logger {
self.application.logger
}
var application: Application
enum CertificateChainCache {
case miss
case hit(ValidatedCertificateChain?)
mutating func lookup(_ updater: () throws -> ValidatedCertificateChain?) -> ValidatedCertificateChain? {
switch self {
case .miss:
let result = try? updater()
self = .hit(result)
return result
case .hit(let result):
return result
}
}
}
var validatedCertificateChainCache: CertificateChainCache
init(application: Application) {
self.application = application
self.requestState = .ready
self.bodyStreamState = .init()
self.validatedCertificateChainCache = .miss
}
func channelRead(context: ChannelHandlerContext, data: NIOAny) {
assert(context.channel.eventLoop.inEventLoop)
let part = self.unwrapInboundIn(data)
self.logger.trace("Decoded HTTP part: \(part)")
switch part {
case .head(let head):
switch self.requestState {
case .ready:
// The certificate chain will only be avalable when configuring `customCertificateVerifyCallbackWithMetadata`.
// Since the certificate chain is validated during the handshake, we only collect it once and cache it.
let peerCertificateChain = self.validatedCertificateChainCache.lookup {
try? context.pipeline.syncOperations.nioSSL_peerValidatedCertificateChain()?.usingX509Certificates()
}
/// Note: It is critical that `URI.init(path:)` is used here, _NOT_ `URI.init(string:)`. The following
/// example illustrates why:
///
/// let uri1 = URI(string: "//foo/bar?a#b"), uri2 = URI(path: "//foo/bar?a#b")
///
/// print(uri1.host, uri1.path, uri1.query, uri1.fragment)
/// // Optional(foo) /bar a b
/// print(uri2.host, uri2.path, uri2.query, uri2.fragment)
/// // nil /foo/bar a b
///
/// The latter parse has the correct semantics for an HTTP request's URL (which, in the absence of an
/// accompanying scheme, should never have a host); the former follows strict RFC 3986 rules.
let request = Request(
application: self.application,
method: head.method,
url: .init(path: head.uri),
version: head.version,
headersNoUpdate: head.headers,
remoteAddress: context.channel.remoteAddress,
peerCertificateChain: peerCertificateChain,
logger: self.application.logger,
byteBufferAllocator: context.channel.allocator,
on: context.channel.eventLoop
)
switch head.version.major {
case 2:
request.requestBox.withLockedValue { $0.isKeepAlive = true }
default:
request.requestBox.withLockedValue { $0.isKeepAlive = head.isKeepAlive }
}
self.requestState = .awaitingBody(request)
default: assertionFailure("Unexpected state: \(self.requestState)")
}
case .body(let buffer):
switch self.requestState {
case .ready, .awaitingEnd:
assertionFailure("Unexpected state: \(self.requestState)")
case .awaitingBody(let request):
// We cannot assume that a request's content-length represents the length of all of the body
// because when a request is g-zipped, content-length refers to the gzipped length.
// Therefore, we can receive data after our expected end-of-request
// When decompressing data, more bytes come out than came in, so content-length does not represent the maximum length
if request.headers.first(name: .contentLength) == buffer.readableBytes.description {
self.requestState = .awaitingEnd(request, buffer)
} else {
let stream = Request.BodyStream(on: context.eventLoop, byteBufferAllocator: context.channel.allocator)
request.bodyStorage.withLockedValue { $0 = .stream(stream) }
self.requestState = .streamingBody(stream)
context.fireChannelRead(self.wrapInboundOut(request))
self.handleBodyStreamStateResult(
context: context,
self.bodyStreamState.didReadBytes(buffer),
stream: stream
)
}
case .streamingBody(let stream):
self.handleBodyStreamStateResult(
context: context,
self.bodyStreamState.didReadBytes(buffer),
stream: stream
)
case .skipping: break
}
case .end(let tailHeaders):
assert(tailHeaders == nil, "Tail headers are not supported.")
switch self.requestState {
case .ready: assertionFailure("Unexpected state: \(self.requestState)")
case .awaitingBody(let request):
context.fireChannelRead(self.wrapInboundOut(request))
case .awaitingEnd(let request, let buffer):
request.bodyStorage.withLockedValue { $0 = .collected(buffer) }
context.fireChannelRead(self.wrapInboundOut(request))
case .streamingBody(let stream):
self.handleBodyStreamStateResult(
context: context,
self.bodyStreamState.didEnd(),
stream: stream
)
case .skipping: break
}
self.requestState = .ready
}
}
func read(context: ChannelHandlerContext) {
switch self.requestState {
case .streamingBody(let stream):
self.handleBodyStreamStateResult(
context: context,
self.bodyStreamState.didReceiveReadRequest(),
stream: stream
)
default:
context.read()
}
}
func errorCaught(context: ChannelHandlerContext, error: Error) {
switch self.requestState {
case .streamingBody(let stream):
self.handleBodyStreamStateResult(
context: context,
self.bodyStreamState.didError(error),
stream: stream
)
default:
break
}
if error is HTTPParserError {
self.logger.debug("Invalid HTTP request, will close connection: \(String(reflecting: error))")
}
context.fireErrorCaught(error)
}
func channelInactive(context: ChannelHandlerContext) {
switch self.requestState {
case .streamingBody(let stream):
self.handleBodyStreamStateResult(
context: context,
self.bodyStreamState.didEnd(),
stream: stream
)
default:
break
}
context.fireChannelInactive()
}
func handleBodyStreamStateResult(
context: ChannelHandlerContext,
_ result: HTTPBodyStreamState.Result,
stream: Request.BodyStream
) {
switch result.action {
case .nothing: break
case .write(let buffer):
let box = NIOLoopBound((context, self), eventLoop: context.eventLoop)
stream.write(.buffer(buffer)).whenComplete { writeResult in
let (context, handler) = box.value
switch writeResult {
case .failure(let error):
handler.handleBodyStreamStateResult(
context: context,
handler.bodyStreamState.didError(error),
stream: stream
)
case .success: break
}
handler.handleBodyStreamStateResult(
context: context,
handler.bodyStreamState.didWrite(),
stream: stream
)
}
case .close(let maybeError):
if let error = maybeError {
stream.write(.error(error), promise: nil)
} else {
stream.write(.end, promise: nil)
}
}
if result.callRead {
context.read()
}
}
func userInboundEventTriggered(context: ChannelHandlerContext, event: Any) {
switch event {
case is HTTPServerResponseEncoder.ResponseEndSentEvent:
switch self.requestState {
case .streamingBody(let bodyStream):
// Response ended during request stream.
if !bodyStream.isBeingRead {
self.logger.trace("Response already sent, draining unhandled request stream.")
bodyStream.read { _, promise in
promise?.succeed(())
}
}
case .awaitingBody, .awaitingEnd:
// Response ended before request started streaming.
self.logger.trace("Response already sent, skipping request body.")
self.requestState = .skipping
case .ready, .skipping:
// Response ended after request had been read.
break
}
case is ChannelShouldQuiesceEvent:
switch self.requestState {
case .ready:
self.logger.trace("Closing keep-alive HTTP connection since server is going away")
context.channel.close(mode: .all, promise: nil)
default:
self.logger.debug("A request is currently in-flight")
context.fireUserInboundEventTriggered(event)
}
default:
context.fireUserInboundEventTriggered(event)
}
}
}
extension NIOHTTP1.HTTPPart: Swift.CustomStringConvertible {
public var description: String {
switch self {
case .head(let head):
return "head: \(head)"
case .body(let body):
return "body: \(body)"
case .end(let headers):
if let headers = headers {
return "end: \(headers)"
} else {
return "end"
}
}
}
}
struct HTTPBodyStreamState: CustomStringConvertible {
struct Result {
enum Action {
case nothing
case write(ByteBuffer)
case close(Error?)
}
let action: Action
let callRead: Bool
}
private struct BufferState {
var bufferedWrites: CircularBuffer<ByteBuffer>
var heldUpRead: Bool
var hasClosed: Bool
mutating func append(_ buffer: ByteBuffer) {
self.bufferedWrites.append(buffer)
}
var isEmpty: Bool {
return self.bufferedWrites.isEmpty
}
mutating func removeFirst() -> ByteBuffer {
return self.bufferedWrites.removeFirst()
}
}
private enum State {
case idle
case writing(BufferState)
case error(Error)
}
private var state: State
var description: String {
"\(self.state)"
}
init() {
self.state = .idle
}
mutating func didReadBytes(_ buffer: ByteBuffer) -> Result {
switch self.state {
case .idle:
self.state = .writing(.init(
bufferedWrites: .init(),
heldUpRead: false,
hasClosed: false
))
return .init(action: .write(buffer), callRead: false)
case .writing(var buffers):
buffers.append(buffer)
self.state = .writing(buffers)
return .init(action: .nothing, callRead: false)
case .error:
return .init(action: .nothing, callRead: false)
}
}
mutating func didReceiveReadRequest() -> Result {
switch self.state {
case .idle:
return .init(action: .nothing, callRead: true)
case .writing(var buffers):
buffers.heldUpRead = true
self.state = .writing(buffers)
return .init(action: .nothing, callRead: false)
case .error:
return .init(action: .nothing, callRead: false)
}
}
mutating func didEnd() -> Result {
switch self.state {
case .idle:
return .init(action: .close(nil), callRead: false)
case .writing(var buffers):
buffers.hasClosed = true
self.state = .writing(buffers)
return .init(action: .nothing, callRead: false)
case .error:
return .init(action: .nothing, callRead: false)
}
}
mutating func didError(_ error: Error) -> Result {
switch self.state {
case .idle:
self.state = .error(error)
return .init(action: .close(error), callRead: false)
case .writing:
self.state = .error(error)
return .init(action: .nothing, callRead: false)
case .error:
return .init(action: .nothing, callRead: false)
}
}
mutating func didWrite() -> Result {
switch self.state {
case .idle:
self.illegalTransition()
case .writing(var buffers):
if buffers.isEmpty {
self.state = .idle
return .init(
action: buffers.hasClosed ? .close(nil) : .nothing,
callRead: buffers.heldUpRead
)
} else {
let first = buffers.removeFirst()
self.state = .writing(buffers)
return .init(action: .write(first), callRead: false)
}
case .error(let error):
return .init(action: .close(error), callRead: false)
}
}
private func illegalTransition(_ function: String = #function) -> Never {
preconditionFailure("illegal transition \(function) in \(self)")
}
} | swift | github | https://github.com/vapor/vapor | Sources/Vapor/HTTP/Server/HTTPServerRequestDecoder.swift |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.