repo_id stringclasses 208 values | file_path stringlengths 31 190 | content stringlengths 1 2.65M | __index_level_0__ int64 0 0 |
|---|---|---|---|
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/connectors/zxJDBC.py | # connectors/zxJDBC.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
from . import Connector
class ZxJDBCConnector(Connector):
driver = "zxjdbc"
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_unicode_binds = True
supports_unicode_statements = sys.version > "2.5.0+"
description_encoding = None
default_paramstyle = "qmark"
jdbc_db_name = None
jdbc_driver_name = None
@classmethod
def dbapi(cls):
from com.ziclix.python.sql import zxJDBC
return zxJDBC
def _driver_kwargs(self):
"""Return kw arg dict to be sent to connect()."""
return {}
def _create_jdbc_url(self, url):
"""Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
return "jdbc:%s://%s%s/%s" % (
self.jdbc_db_name,
url.host,
url.port is not None and ":%s" % url.port or "",
url.database,
)
def create_connect_args(self, url):
opts = self._driver_kwargs()
opts.update(url.query)
return [
[
self._create_jdbc_url(url),
url.username,
url.password,
self.jdbc_driver_name,
],
opts,
]
def is_disconnect(self, e, connection, cursor):
if not isinstance(e, self.dbapi.ProgrammingError):
return False
e = str(e)
return "connection is closed" in e or "cursor is closed" in e
def _get_server_version_info(self, connection):
# use connection.connection.dbversion, and parse appropriately
# to get a tuple
raise NotImplementedError()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/connectors/__init__.py | # connectors/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
class Connector(object):
pass
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/connectors/pyodbc.py | # connectors/pyodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from . import Connector
from .. import util
class PyODBCConnector(Connector):
driver = "pyodbc"
# this is no longer False for pyodbc in general
supports_sane_rowcount_returning = True
supports_sane_multi_rowcount = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_native_decimal = True
default_paramstyle = "named"
# for non-DSN connections, this *may* be used to
# hold the desired driver name
pyodbc_driver_name = None
def __init__(self, supports_unicode_binds=None, **kw):
super(PyODBCConnector, self).__init__(**kw)
if supports_unicode_binds is not None:
self.supports_unicode_binds = supports_unicode_binds
@classmethod
def dbapi(cls):
return __import__("pyodbc")
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
keys = opts
query = url.query
connect_args = {}
for param in ("ansi", "unicode_results", "autocommit"):
if param in keys:
connect_args[param] = util.asbool(keys.pop(param))
if "odbc_connect" in keys:
connectors = [util.unquote_plus(keys.pop("odbc_connect"))]
else:
def check_quote(token):
if ";" in str(token):
token = "{%s}" % token.replace("}", "}}")
return token
keys = dict((k, check_quote(v)) for k, v in keys.items())
dsn_connection = "dsn" in keys or (
"host" in keys and "database" not in keys
)
if dsn_connection:
connectors = [
"dsn=%s" % (keys.pop("host", "") or keys.pop("dsn", ""))
]
else:
port = ""
if "port" in keys and "port" not in query:
port = ",%d" % int(keys.pop("port"))
connectors = []
driver = keys.pop("driver", self.pyodbc_driver_name)
if driver is None and keys:
# note if keys is empty, this is a totally blank URL
util.warn(
"No driver name specified; "
"this is expected by PyODBC when using "
"DSN-less connections"
)
else:
connectors.append("DRIVER={%s}" % driver)
connectors.extend(
[
"Server=%s%s" % (keys.pop("host", ""), port),
"Database=%s" % keys.pop("database", ""),
]
)
user = keys.pop("user", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % keys.pop("password", ""))
else:
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if "odbc_autotranslate" in keys:
connectors.append(
"AutoTranslate=%s" % keys.pop("odbc_autotranslate")
)
connectors.extend(["%s=%s" % (k, v) for k, v in keys.items()])
return [[";".join(connectors)], connect_args]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return "The cursor's connection has been closed." in str(
e
) or "Attempt to use a closed connection." in str(e)
else:
return False
# def initialize(self, connection):
# super(PyODBCConnector, self).initialize(connection)
def _dbapi_version(self):
if not self.dbapi:
return ()
return self._parse_dbapi_version(self.dbapi.version)
def _parse_dbapi_version(self, vers):
m = re.match(r"(?:py.*-)?([\d\.]+)(?:-(\w+))?", vers)
if not m:
return ()
vers = tuple([int(x) for x in m.group(1).split(".")])
if m.group(2):
vers += (m.group(2),)
return vers
def _get_server_version_info(self, connection, allow_chars=True):
# NOTE: this function is not reliable, particularly when
# freetds is in use. Implement database-specific server version
# queries.
dbapi_con = connection.connection
version = []
r = re.compile(r"[.\-]")
for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)):
try:
version.append(int(n))
except ValueError:
if allow_chars:
version.append(n)
return tuple(version)
def set_isolation_level(self, connection, level):
# adjust for ConnectionFairy being present
# allows attribute set e.g. "connection.autocommit = True"
# to work properly
if hasattr(connection, "connection"):
connection = connection.connection
if level == "AUTOCOMMIT":
connection.autocommit = True
else:
connection.autocommit = False
super(PyODBCConnector, self).set_isolation_level(connection, level)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/databases/__init__.py | # databases/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Include imports from the sqlalchemy.dialects package for backwards
compatibility with pre 0.6 versions.
"""
from ..dialects.firebird import base as firebird
from ..dialects.mssql import base as mssql
from ..dialects.mysql import base as mysql
from ..dialects.oracle import base as oracle
from ..dialects.postgresql import base as postgresql
from ..dialects.sqlite import base as sqlite
from ..dialects.sybase import base as sybase
postgres = postgresql
__all__ = (
"firebird",
"mssql",
"mysql",
"postgresql",
"sqlite",
"oracle",
"sybase",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/util/queue.py | # util/queue.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""An adaptation of Py2.3/2.4's Queue module which supports reentrant
behavior, using RLock instead of Lock for its mutex object. The
Queue object is used exclusively by the sqlalchemy.pool.QueuePool
class.
This is to support the connection pool's usage of weakref callbacks to return
connections to the underlying Queue, which can in extremely
rare cases be invoked within the ``get()`` method of the Queue itself,
producing a ``put()`` inside the ``get()`` and therefore a reentrant
condition.
"""
from collections import deque
from time import time as _time
from .compat import threading
__all__ = ["Empty", "Full", "Queue"]
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
def __init__(self, maxsize=0, use_lifo=False):
"""Initialize a queue object with a given maximum size.
If `maxsize` is <= 0, the queue size is infinite.
If `use_lifo` is True, this Queue acts like a Stack (LIFO).
"""
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the two conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.RLock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# If this queue uses LIFO or FIFO
self.use_lifo = use_lifo
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not
reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until a free slot is
available. If `timeout` is a positive number, it blocks at
most `timeout` seconds and raises the ``Full`` exception if no
free slot was available within that time. Otherwise (`block`
is false), put an item on the queue if a free slot is
immediately available, else raise the ``Full`` exception
(`timeout` is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the ``Full`` exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args `block` is True and `timeout` is None (the
default), block if necessary until an item is available. If
`timeout` is a positive number, it blocks at most `timeout`
seconds and raises the ``Empty`` exception if no item was
available within that time. Otherwise (`block` is false),
return an item if one is immediately available, else raise the
``Empty`` exception (`timeout` is ignored in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the ``Empty`` exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
if self.use_lifo:
# LIFO
return self.queue.pop()
else:
# FIFO
return self.queue.popleft()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/util/topological.py | # util/topological.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Topological sorting algorithms."""
from .. import util
from ..exc import CircularDependencyError
__all__ = ["sort", "sort_as_subsets", "find_cycles"]
def sort_as_subsets(tuples, allitems, deterministic_order=False):
edges = util.defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
Set = util.OrderedSet if deterministic_order else set
todo = Set(allitems)
while todo:
output = Set()
for node in todo:
if todo.isdisjoint(edges[node]):
output.add(node)
if not output:
raise CircularDependencyError(
"Circular dependency detected.",
find_cycles(tuples, allitems),
_gen_edges(edges),
)
todo.difference_update(output)
yield output
def sort(tuples, allitems, deterministic_order=False):
"""sort the given list of items by dependency.
'tuples' is a list of tuples representing a partial ordering.
'deterministic_order' keeps items within a dependency tier in list order.
"""
for set_ in sort_as_subsets(tuples, allitems, deterministic_order):
for s in set_:
yield s
def find_cycles(tuples, allitems):
# adapted from:
# http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
edges = util.defaultdict(set)
for parent, child in tuples:
edges[parent].add(child)
nodes_to_test = set(edges)
output = set()
# we'd like to find all nodes that are
# involved in cycles, so we do the full
# pass through the whole thing for each
# node in the original list.
# we can go just through parent edge nodes.
# if a node is only a child and never a parent,
# by definition it can't be part of a cycle. same
# if it's not in the edges at all.
for node in nodes_to_test:
stack = [node]
todo = nodes_to_test.difference(stack)
while stack:
top = stack[-1]
for node in edges[top]:
if node in stack:
cyc = stack[stack.index(node) :]
todo.difference_update(cyc)
output.update(cyc)
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return output
def _gen_edges(edges):
return set([(right, left) for left in edges for right in edges[left]])
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/util/compat.py | # util/compat.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handle Python version/platform incompatibilities."""
import collections
import contextlib
import inspect
import operator
import sys
py36 = sys.version_info >= (3, 6)
py33 = sys.version_info >= (3, 3)
py35 = sys.version_info >= (3, 5)
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
py2k = sys.version_info < (3, 0)
py265 = sys.version_info >= (2, 6, 5)
jython = sys.platform.startswith("java")
pypy = hasattr(sys, "pypy_version_info")
win32 = sys.platform.startswith("win")
cpython = not pypy and not jython # TODO: something better for this ?
contextmanager = contextlib.contextmanager
dottedgetter = operator.attrgetter
namedtuple = collections.namedtuple
next = next # noqa
FullArgSpec = collections.namedtuple(
"FullArgSpec",
[
"args",
"varargs",
"varkw",
"defaults",
"kwonlyargs",
"kwonlydefaults",
"annotations",
],
)
try:
import threading
except ImportError:
import dummy_threading as threading # noqa
# work around http://bugs.python.org/issue2646
if py265:
safe_kwarg = lambda arg: arg # noqa
else:
safe_kwarg = str
def inspect_getfullargspec(func):
"""Fully vendored version of getfullargspec from Python 3.3."""
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
raise TypeError("{!r} is not a Python function".format(func))
co = func.__code__
if not inspect.iscode(co):
raise TypeError("{!r} is not a code object".format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount if py3k else 0
args = list(names[:nargs])
kwonlyargs = list(names[nargs : nargs + nkwargs])
nargs += nkwargs
varargs = None
if co.co_flags & inspect.CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & inspect.CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return FullArgSpec(
args,
varargs,
varkw,
func.__defaults__,
kwonlyargs,
func.__kwdefaults__ if py3k else None,
func.__annotations__ if py3k else {},
)
if py3k:
import base64
import builtins
import configparser
import itertools
import pickle
from functools import reduce
from io import BytesIO as byte_buffer
from io import StringIO
from itertools import zip_longest
from urllib.parse import (
quote_plus,
unquote_plus,
parse_qsl,
quote,
unquote,
)
string_types = (str,)
binary_types = (bytes,)
binary_type = bytes
text_type = str
int_types = (int,)
iterbytes = iter
itertools_filterfalse = itertools.filterfalse
itertools_filter = filter
itertools_imap = map
exec_ = getattr(builtins, "exec")
import_ = getattr(builtins, "__import__")
print_ = getattr(builtins, "print")
def b(s):
return s.encode("latin-1")
def b64decode(x):
return base64.b64decode(x.encode("ascii"))
def b64encode(x):
return base64.b64encode(x).decode("ascii")
def decode_backslashreplace(text, encoding):
return text.decode(encoding, errors="backslashreplace")
def cmp(a, b):
return (a > b) - (a < b)
def raise_(
exception, with_traceback=None, replace_context=None, from_=False
):
r"""implement "raise" with cause support.
:param exception: exception to raise
:param with_traceback: will call exception.with_traceback()
:param replace_context: an as-yet-unsupported feature. This is
an exception object which we are "replacing", e.g., it's our
"cause" but we don't want it printed. Basically just what
``__suppress_context__`` does but we don't want to suppress
the enclosing context, if any. So for now we make it the
cause.
:param from\_: the cause. this actually sets the cause and doesn't
hope to hide it someday.
"""
if with_traceback is not None:
exception = exception.with_traceback(with_traceback)
if from_ is not False:
exception.__cause__ = from_
elif replace_context is not None:
# no good solution here, we would like to have the exception
# have only the context of replace_context.__context__ so that the
# intermediary exception does not change, but we can't figure
# that out.
exception.__cause__ = replace_context
try:
raise exception
finally:
# credit to
# https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
# as the __traceback__ object creates a cycle
del exception, replace_context, from_, with_traceback
def u(s):
return s
def ue(s):
return s
if py32:
callable = callable # noqa
else:
def callable(fn): # noqa
return hasattr(fn, "__call__")
else:
import base64
import ConfigParser as configparser # noqa
import itertools
from StringIO import StringIO # noqa
from cStringIO import StringIO as byte_buffer # noqa
from itertools import izip_longest as zip_longest # noqa
from urllib import quote # noqa
from urllib import quote_plus # noqa
from urllib import unquote # noqa
from urllib import unquote_plus # noqa
from urlparse import parse_qsl # noqa
try:
import cPickle as pickle
except ImportError:
import pickle # noqa
string_types = (basestring,) # noqa
binary_types = (bytes,)
binary_type = str
text_type = unicode # noqa
int_types = int, long # noqa
callable = callable # noqa
cmp = cmp # noqa
reduce = reduce # noqa
b64encode = base64.b64encode
b64decode = base64.b64decode
itertools_filterfalse = itertools.ifilterfalse
itertools_filter = itertools.ifilter
itertools_imap = itertools.imap
def b(s):
return s
def exec_(func_text, globals_, lcl=None):
if lcl is None:
exec("exec func_text in globals_")
else:
exec("exec func_text in globals_, lcl")
def iterbytes(buf):
return (ord(byte) for byte in buf)
def import_(*args):
if len(args) == 4:
args = args[0:3] + ([str(arg) for arg in args[3]],)
return __import__(*args)
def print_(*args, **kwargs):
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
for arg in enumerate(args):
if not isinstance(arg, basestring): # noqa
arg = str(arg)
fp.write(arg)
def u(s):
# this differs from what six does, which doesn't support non-ASCII
# strings - we only use u() with
# literal source strings, and all our source files with non-ascii
# in them (all are tests) are utf-8 encoded.
return unicode(s, "utf-8") # noqa
def ue(s):
return unicode(s, "unicode_escape") # noqa
def decode_backslashreplace(text, encoding):
try:
return text.decode(encoding)
except UnicodeDecodeError:
# regular "backslashreplace" for an incompatible encoding raises:
# "TypeError: don't know how to handle UnicodeDecodeError in
# error callback"
return repr(text)[1:-1].decode()
def safe_bytestring(text):
# py2k only
if not isinstance(text, string_types):
return unicode(text).encode("ascii", errors="backslashreplace")
elif isinstance(text, unicode):
return text.encode("ascii", errors="backslashreplace")
else:
return text
exec(
"def raise_(exception, with_traceback=None, replace_context=None, "
"from_=False):\n"
" if with_traceback:\n"
" raise type(exception), exception, with_traceback\n"
" else:\n"
" raise exception\n"
)
if py35:
def _formatannotation(annotation, base_module=None):
"""vendored from python 3.7
"""
if getattr(annotation, "__module__", None) == "typing":
return repr(annotation).replace("typing.", "")
if isinstance(annotation, type):
if annotation.__module__ in ("builtins", base_module):
return annotation.__qualname__
return annotation.__module__ + "." + annotation.__qualname__
return repr(annotation)
def inspect_formatargspec(
args,
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=(),
kwonlydefaults={},
annotations={},
formatarg=str,
formatvarargs=lambda name: "*" + name,
formatvarkw=lambda name: "**" + name,
formatvalue=lambda value: "=" + repr(value),
formatreturns=lambda text: " -> " + text,
formatannotation=_formatannotation,
):
"""Copy formatargspec from python 3.7 standard library.
Python 3 has deprecated formatargspec and requested that Signature
be used instead, however this requires a full reimplementation
of formatargspec() in terms of creating Parameter objects and such.
Instead of introducing all the object-creation overhead and having
to reinvent from scratch, just copy their compatibility routine.
Utimately we would need to rewrite our "decorator" routine completely
which is not really worth it right now, until all Python 2.x support
is dropped.
"""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ": " + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append("*")
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = "(" + ", ".join(specs) + ")"
if "return" in annotations:
result += formatreturns(formatannotation(annotations["return"]))
return result
elif py2k:
from inspect import formatargspec as _inspect_formatargspec
def inspect_formatargspec(*spec, **kw):
# convert for a potential FullArgSpec from compat.getfullargspec()
return _inspect_formatargspec(*spec[0:4], **kw) # noqa
else:
from inspect import formatargspec as inspect_formatargspec # noqa
# Fix deprecation of accessing ABCs straight from collections module
# (which will stop working in 3.8).
if py33:
import collections.abc as collections_abc
else:
import collections as collections_abc # noqa
@contextlib.contextmanager
def nested(*managers):
"""Implement contextlib.nested, mostly for unit tests.
As tests still need to run on py2.6 we can't use multiple-with yet.
Function is removed in py3k but also emits deprecation warning in 2.7
so just roll it here for everyone.
"""
exits = []
vars_ = []
exc = (None, None, None)
try:
for mgr in managers:
exit_ = mgr.__exit__
enter = mgr.__enter__
vars_.append(enter())
exits.append(exit_)
yield vars_
except:
exc = sys.exc_info()
finally:
while exits:
exit_ = exits.pop() # noqa
try:
if exit_(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
reraise(exc[0], exc[1], exc[2])
def raise_from_cause(exception, exc_info=None):
r"""legacy. use raise\_()"""
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
cause = exc_value if exc_value is not exception else None
reraise(type(exception), exception, tb=exc_tb, cause=cause)
def reraise(tp, value, tb=None, cause=None):
r"""legacy. use raise\_()"""
raise_(value, with_traceback=tb, from_=cause)
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.
Drops the middle class upon creation.
Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass("temporary_class", None, {})
if py3k:
from datetime import timezone
else:
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
class timezone(tzinfo):
"""Minimal port of python 3 timezone object"""
__slots__ = "_offset"
def __init__(self, offset):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if not self._minoffset <= offset <= self._maxoffset:
raise ValueError(
"offset must be a timedelta "
"strictly between -timedelta(hours=24) and "
"timedelta(hours=24)."
)
self._offset = offset
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
return "sqlalchemy.util.%s(%r)" % (
self.__class__.__name__,
self._offset,
)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name_from_offset(self._offset)
def dst(self, dt):
return None
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo " "is not self")
return dt + self._offset
raise TypeError(
"fromutc() argument must be a datetime instance" " or None"
)
@staticmethod
def _timedelta_to_microseconds(timedelta):
"""backport of timedelta._to_microseconds()"""
return (
timedelta.days * (24 * 3600) + timedelta.seconds
) * 1000000 + timedelta.microseconds
@staticmethod
def _divmod_timedeltas(a, b):
"""backport of timedelta.__divmod__"""
q, r = divmod(
timezone._timedelta_to_microseconds(a),
timezone._timedelta_to_microseconds(b),
)
return q, timedelta(0, 0, r)
@staticmethod
def _name_from_offset(delta):
if not delta:
return "UTC"
if delta < timedelta(0):
sign = "-"
delta = -delta
else:
sign = "+"
hours, rest = timezone._divmod_timedeltas(
delta, timedelta(hours=1)
)
minutes, rest = timezone._divmod_timedeltas(
rest, timedelta(minutes=1)
)
result = "UTC%s%02d:%02d" % (sign, hours, minutes)
if rest.seconds:
result += ":%02d" % (rest.seconds,)
if rest.microseconds:
result += ".%06d" % (rest.microseconds,)
return result
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
timezone.utc = timezone(timedelta(0))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/util/__init__.py | # util/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import defaultdict # noqa
from contextlib import contextmanager # noqa
from functools import partial # noqa
from functools import update_wrapper # noqa
from ._collections import coerce_generator_arg # noqa
from ._collections import collections_abc # noqa
from ._collections import column_dict # noqa
from ._collections import column_set # noqa
from ._collections import EMPTY_SET # noqa
from ._collections import flatten_iterator # noqa
from ._collections import has_dupes # noqa
from ._collections import has_intersection # noqa
from ._collections import IdentitySet # noqa
from ._collections import ImmutableContainer # noqa
from ._collections import immutabledict # noqa
from ._collections import ImmutableProperties # noqa
from ._collections import KeyedTuple # noqa
from ._collections import lightweight_named_tuple # noqa
from ._collections import LRUCache # noqa
from ._collections import ordered_column_set # noqa
from ._collections import OrderedDict # noqa
from ._collections import OrderedIdentitySet # noqa
from ._collections import OrderedProperties # noqa
from ._collections import OrderedSet # noqa
from ._collections import PopulateDict # noqa
from ._collections import Properties # noqa
from ._collections import ScopedRegistry # noqa
from ._collections import ThreadLocalRegistry # noqa
from ._collections import to_column_set # noqa
from ._collections import to_list # noqa
from ._collections import to_set # noqa
from ._collections import unique_list # noqa
from ._collections import UniqueAppender # noqa
from ._collections import update_copy # noqa
from ._collections import WeakPopulateDict # noqa
from ._collections import WeakSequence # noqa
from .compat import b # noqa
from .compat import b64decode # noqa
from .compat import b64encode # noqa
from .compat import binary_type # noqa
from .compat import byte_buffer # noqa
from .compat import callable # noqa
from .compat import cmp # noqa
from .compat import cpython # noqa
from .compat import decode_backslashreplace # noqa
from .compat import dottedgetter # noqa
from .compat import inspect_getfullargspec # noqa
from .compat import int_types # noqa
from .compat import iterbytes # noqa
from .compat import itertools_filter # noqa
from .compat import itertools_filterfalse # noqa
from .compat import jython # noqa
from .compat import namedtuple # noqa
from .compat import nested # noqa
from .compat import next # noqa
from .compat import parse_qsl # noqa
from .compat import pickle # noqa
from .compat import print_ # noqa
from .compat import py2k # noqa
from .compat import py33 # noqa
from .compat import py36 # noqa
from .compat import py3k # noqa
from .compat import pypy # noqa
from .compat import quote_plus # noqa
from .compat import raise_ # noqa
from .compat import raise_from_cause # noqa
from .compat import reduce # noqa
from .compat import reraise # noqa
from .compat import safe_kwarg # noqa
from .compat import string_types # noqa
from .compat import StringIO # noqa
from .compat import text_type # noqa
from .compat import threading # noqa
from .compat import timezone # noqa
from .compat import u # noqa
from .compat import ue # noqa
from .compat import unquote # noqa
from .compat import unquote_plus # noqa
from .compat import win32 # noqa
from .compat import with_metaclass # noqa
from .compat import zip_longest # noqa
from .deprecations import deprecated # noqa
from .deprecations import deprecated_cls # noqa
from .deprecations import deprecated_params # noqa
from .deprecations import inject_docstring_text # noqa
from .deprecations import pending_deprecation # noqa
from .deprecations import warn_deprecated # noqa
from .deprecations import warn_pending_deprecation # noqa
from .langhelpers import add_parameter_text # noqa
from .langhelpers import as_interface # noqa
from .langhelpers import asbool # noqa
from .langhelpers import asint # noqa
from .langhelpers import assert_arg_type # noqa
from .langhelpers import attrsetter # noqa
from .langhelpers import bool_or_str # noqa
from .langhelpers import chop_traceback # noqa
from .langhelpers import class_hierarchy # noqa
from .langhelpers import classproperty # noqa
from .langhelpers import clsname_as_plain_name # noqa
from .langhelpers import coerce_kw_type # noqa
from .langhelpers import constructor_copy # noqa
from .langhelpers import counter # noqa
from .langhelpers import decode_slice # noqa
from .langhelpers import decorator # noqa
from .langhelpers import dependencies # noqa
from .langhelpers import dictlike_iteritems # noqa
from .langhelpers import duck_type_collection # noqa
from .langhelpers import ellipses_string # noqa
from .langhelpers import EnsureKWArgType # noqa
from .langhelpers import format_argspec_init # noqa
from .langhelpers import format_argspec_plus # noqa
from .langhelpers import generic_repr # noqa
from .langhelpers import get_callable_argspec # noqa
from .langhelpers import get_cls_kwargs # noqa
from .langhelpers import get_func_kwargs # noqa
from .langhelpers import getargspec_init # noqa
from .langhelpers import group_expirable_memoized_property # noqa
from .langhelpers import hybridmethod # noqa
from .langhelpers import hybridproperty # noqa
from .langhelpers import iterate_attributes # noqa
from .langhelpers import map_bits # noqa
from .langhelpers import md5_hex # noqa
from .langhelpers import memoized_instancemethod # noqa
from .langhelpers import memoized_property # noqa
from .langhelpers import MemoizedSlots # noqa
from .langhelpers import methods_equivalent # noqa
from .langhelpers import monkeypatch_proxied_specials # noqa
from .langhelpers import NoneType # noqa
from .langhelpers import only_once # noqa
from .langhelpers import PluginLoader # noqa
from .langhelpers import portable_instancemethod # noqa
from .langhelpers import quoted_token_parser # noqa
from .langhelpers import safe_reraise # noqa
from .langhelpers import set_creation_order # noqa
from .langhelpers import symbol # noqa
from .langhelpers import unbound_method_to_callable # noqa
from .langhelpers import warn # noqa
from .langhelpers import warn_exception # noqa
from .langhelpers import warn_limited # noqa
from .langhelpers import wrap_callable # noqa
# things that used to be not always available,
# but are now as of current support Python versions
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/util/deprecations.py | # util/deprecations.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Helpers related to deprecation of functions, methods, classes, other
functionality."""
import re
import warnings
from . import compat
from .langhelpers import _hash_limit_string
from .langhelpers import decorator
from .langhelpers import inject_docstring_text
from .langhelpers import inject_param_text
from .. import exc
def warn_deprecated(msg, stacklevel=3):
warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel)
def warn_deprecated_limited(msg, args, stacklevel=3):
"""Issue a deprecation warning with a parameterized string,
limiting the number of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
warnings.warn(msg, exc.SADeprecationWarning, stacklevel)
def warn_pending_deprecation(msg, stacklevel=3):
warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel)
def deprecated_cls(version, message, constructor="__init__"):
header = ".. deprecated:: %s %s" % (version, (message or ""))
def decorate(cls):
return _decorate_cls_with_warning(
cls,
constructor,
exc.SADeprecationWarning,
message % dict(func=constructor),
header,
)
return decorate
def deprecated(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param version:
Issue version in the warning.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % (version, (message or ""))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn,
exc.SADeprecationWarning,
message % dict(func=fn.__name__),
header,
)
return decorate
def deprecated_params(**specs):
"""Decorates a function to warn on use of certain parameters.
e.g. ::
@deprecated_params(
weak_identity_map=(
"0.7",
"the :paramref:`.Session.weak_identity_map parameter "
"is deprecated."
)
)
"""
messages = {}
for param, (version, message) in specs.items():
messages[param] = _sanitize_restructured_text(message)
def decorate(fn):
spec = compat.inspect_getfullargspec(fn)
if spec.defaults is not None:
defaults = dict(
zip(
spec.args[(len(spec.args) - len(spec.defaults)) :],
spec.defaults,
)
)
check_defaults = set(defaults).intersection(messages)
check_kw = set(messages).difference(defaults)
else:
check_defaults = ()
check_kw = set(messages)
@decorator
def warned(fn, *args, **kwargs):
for m in check_defaults:
if kwargs[m] != defaults[m]:
warnings.warn(
messages[m], exc.SADeprecationWarning, stacklevel=3
)
for m in check_kw:
if m in kwargs:
warnings.warn(
messages[m], exc.SADeprecationWarning, stacklevel=3
)
return fn(*args, **kwargs)
doc = fn.__doc__ is not None and fn.__doc__ or ""
if doc:
doc = inject_param_text(
doc,
{
param: ".. deprecated:: %s %s" % (version, (message or ""))
for param, (version, message) in specs.items()
},
)
decorated = warned(fn)
decorated.__doc__ = doc
return decorated
return decorate
def pending_deprecation(
version, message=None, add_deprecation_to_docstring=True
):
"""Decorates a function and issues a pending deprecation warning on use.
:param version:
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s (pending) %s" % (version, (message or ""))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn,
exc.SAPendingDeprecationWarning,
message % dict(func=fn.__name__),
header,
)
return decorate
def deprecated_option_value(parameter_value, default_value, warning_text):
if parameter_value is None:
return default_value
else:
warn_deprecated(warning_text)
return parameter_value
def _sanitize_restructured_text(text):
def repl(m):
type_, name = m.group(1, 2)
if type_ in ("func", "meth"):
name += "()"
return name
return re.sub(r"\:(\w+)\:`~?(?:_\w+)?\.?(.+?)`", repl, text)
def _decorate_cls_with_warning(
cls, constructor, wtype, message, docstring_header=None
):
doc = cls.__doc__ is not None and cls.__doc__ or ""
if docstring_header is not None:
docstring_header %= dict(func=constructor)
doc = inject_docstring_text(doc, docstring_header, 1)
if type(cls) is type:
clsdict = dict(cls.__dict__)
clsdict["__doc__"] = doc
cls = type(cls.__name__, cls.__bases__, clsdict)
constructor_fn = clsdict[constructor]
else:
cls.__doc__ = doc
constructor_fn = getattr(cls, constructor)
setattr(
cls,
constructor,
_decorate_with_warning(constructor_fn, wtype, message, None),
)
return cls
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
message = _sanitize_restructured_text(message)
@decorator
def warned(fn, *args, **kwargs):
warnings.warn(message, wtype, stacklevel=3)
return fn(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ""
if docstring_header is not None:
docstring_header %= dict(func=func.__name__)
doc = inject_docstring_text(doc, docstring_header, 1)
decorated = warned(func)
decorated.__doc__ = doc
decorated._sa_warn = lambda: warnings.warn(message, wtype, stacklevel=3)
return decorated
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/util/_collections.py | # util/_collections.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
from __future__ import absolute_import
import operator
import types
import weakref
from .compat import binary_types
from .compat import collections_abc
from .compat import itertools_filterfalse
from .compat import py2k
from .compat import string_types
from .compat import threading
EMPTY_SET = frozenset()
class AbstractKeyedTuple(tuple):
__slots__ = ()
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return list(self._fields)
class KeyedTuple(AbstractKeyedTuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`_query.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`_query.Query` object's use case.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
else:
labels = []
t.__dict__["_labels"] = labels
return t
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple([l for l in self._labels if l is not None])
def __setattr__(self, key, value):
raise AttributeError("Can't set attribute: %s" % key)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
"""
return {key: self.__dict__[key] for key in self.keys()}
class _LW(AbstractKeyedTuple):
__slots__ = ()
def __new__(cls, vals):
return tuple.__new__(cls, vals)
def __reduce__(self):
# for pickling, degrade down to the regular
# KeyedTuple, thus avoiding anonymous class pickling
# difficulties
return KeyedTuple, (list(self), self._real_fields)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary."""
d = dict(zip(self._real_fields, self))
d.pop(None, None)
return d
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self),)
def union(self, d):
if not d:
return self
elif not self:
if isinstance(d, immutabledict):
return d
else:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
__slots__ = ("_data",)
def __init__(self, data):
object.__setattr__(self, "_data", data)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(list(self._data.values()))
def __dir__(self):
return dir(super(Properties, self)) + [
str(k) for k in self._data.keys()
]
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, obj):
self._data[key] = obj
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, obj):
self._data[key] = obj
def __getstate__(self):
return {"_data": self._data}
def __setstate__(self, state):
object.__setattr__(self, "_data", state["_data"])
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return list(self._data)
def values(self):
return list(self._data.values())
def items(self):
return list(self._data.items())
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
__slots__ = ()
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
__slots__ = ()
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
__slots__ = ("_list",)
def __reduce__(self):
return OrderedDict, (self.items(),)
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, "keys"):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def keys(self):
return list(self)
def values(self):
return [self[key] for key in self._list]
def items(self):
return [(key, self[key]) for key in self._list]
if py2k:
def itervalues(self):
return iter(self.values())
def iterkeys(self):
return iter(self)
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, obj):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, obj)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self._list = unique_list(d)
set.update(self, self._list)
else:
self._list = []
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
def __init__(self, iterable=None):
self._members = dict()
if iterable:
self.update(iterable)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError("pop from an empty set")
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError("cannot compare sets using cmp()")
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = self.__class__(iterable)
if len(self) > len(other):
return False
for m in itertools_filterfalse(
other._members.__contains__, iter(self._members.keys())
):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = self.__class__(iterable)
if len(self) < len(other):
return False
for m in itertools_filterfalse(
self._members.__contains__, iter(other._members.keys())
):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = self.__class__()
members = self._members
result._members.update(members)
result._members.update((id(obj), obj) for obj in iterable)
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members.update((id(obj), obj) for obj in iterable)
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj) for obj in iterable}
result._members.update(
((k, v) for k, v in members.items() if k not in other)
)
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj) for obj in iterable}
result._members.update(
(k, v) for k, v in members.items() if k in other
)
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj): obj for obj in iterable}
result._members.update(
((k, v) for k, v in members.items() if k not in other)
)
result._members.update(
((k, v) for k, v in other.items() if k not in members)
)
return result
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(iter(self._members.values()))
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return iter(self._members.values())
def __hash__(self):
raise TypeError("set objects are unhashable")
def __repr__(self):
return "%s(%r)" % (type(self).__name__, list(self._members.values()))
class WeakSequence(object):
def __init__(self, __elements=()):
# adapted from weakref.WeakKeyDictionary, prevent reference
# cycles in the collection itself
def _remove(item, selfref=weakref.ref(self)):
self = selfref()
if self is not None:
self._storage.remove(item)
self._remove = _remove
self._storage = [
weakref.ref(element, _remove) for element in __elements
]
def append(self, item):
self._storage.append(weakref.ref(item, self._remove))
def __len__(self):
return len(self._storage)
def __iter__(self):
return (
obj for obj in (ref() for ref in self._storage) if obj is not None
)
def __getitem__(self, index):
try:
obj = self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
else:
return obj()
class OrderedIdentitySet(IdentitySet):
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
class WeakPopulateDict(dict):
"""Like PopulateDict, but assumes a self + a method and does not create
a reference cycle.
"""
def __init__(self, creator_method):
self.creator = creator_method.__func__
weakself = creator_method.__self__
self.weakself = weakref.ref(weakself)
def __missing__(self, key):
self[key] = val = self.creator(self.weakself(), key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
_getters = PopulateDict(operator.itemgetter)
_property_getters = PopulateDict(
lambda idx: property(operator.itemgetter(idx))
)
def unique_list(seq, hashfunc=None):
seen = set()
seen_add = seen.add
if not hashfunc:
return [x for x in seq if x not in seen and not seen_add(x)]
else:
return [
x
for x in seq
if hashfunc(x) not in seen and not seen_add(hashfunc(x))
]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, "append"):
self._data_appender = data.append
elif hasattr(data, "add"):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def coerce_generator_arg(arg):
if len(arg) == 1 and isinstance(arg[0], types.GeneratorType):
return list(arg[0])
else:
return arg
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, collections_abc.Iterable) or isinstance(
x, string_types + binary_types
):
return [x]
elif isinstance(x, list):
return x
else:
return list(x)
def has_intersection(set_, iterable):
r"""return True if any items of set\_ are present in iterable.
Goes through special effort to ensure __hash__ is not called
on items in iterable that don't support it.
"""
# TODO: optimize, write in C, etc.
return bool(set_.intersection([i for i in iterable if i.__hash__]))
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, str) and hasattr(elem, "__iter__"):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
Note that either get() or [] should be used here, but
generally its not safe to do an "in" check first as the dictionary
can change subsequent to that call.
"""
__slots__ = "capacity", "threshold", "size_alert", "_counter", "_mutex"
def __init__(self, capacity=100, threshold=0.5, size_alert=None):
self.capacity = capacity
self.threshold = threshold
self.size_alert = size_alert
self._counter = 0
self._mutex = threading.Lock()
def _inc_counter(self):
self._counter += 1
return self._counter
def get(self, key, default=None):
item = dict.get(self, key, default)
if item is not default:
item[2] = self._inc_counter()
return item[1]
else:
return default
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
@property
def size_threshold(self):
return self.capacity + self.capacity * self.threshold
def _manage_size(self):
if not self._mutex.acquire(False):
return
try:
size_alert = bool(self.size_alert)
while len(self) > self.capacity + self.capacity * self.threshold:
if size_alert:
size_alert = False
self.size_alert(self)
by_counter = sorted(
dict.values(self), key=operator.itemgetter(2), reverse=True
)
for item in by_counter[self.capacity :]:
try:
del self[item[0]]
except KeyError:
# deleted elsewhere; skip
continue
finally:
self._mutex.release()
_lw_tuples = LRUCache(100)
def lightweight_named_tuple(name, fields):
hash_ = (name,) + tuple(fields)
tp_cls = _lw_tuples.get(hash_)
if tp_cls:
return tp_cls
tp_cls = type(
name,
(_LW,),
dict(
[
(field, _property_getters[idx])
for idx, field in enumerate(fields)
if field is not None
]
+ [("__slots__", ())]
),
)
tp_cls._real_fields = fields
tp_cls._fields = tuple([f for f in fields if f is not None])
_lw_tuples[hash_] = tp_cls
return tp_cls
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value for the current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def has_dupes(sequence, target):
"""Given a sequence and search object, return True if there's more
than one, False if zero or one of them.
"""
# compare to .index version below, this version introduces less function
# overhead and is usually the same speed. At 15000 items (way bigger than
# a relationship-bound collection in memory usually is) it begins to
# fall behind the other version only by microseconds.
c = 0
for item in sequence:
if item is target:
c += 1
if c > 1:
return True
return False
# .index version. the two __contains__ calls as well
# as .index() and isinstance() slow this down.
# def has_dupes(sequence, target):
# if target not in sequence:
# return False
# elif not isinstance(sequence, collections_abc.Sequence):
# return False
#
# idx = sequence.index(target)
# return target in sequence[idx + 1:]
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/util/langhelpers.py | # util/langhelpers.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
from functools import update_wrapper
import hashlib
import inspect
import itertools
import operator
import re
import sys
import textwrap
import types
import warnings
from . import _collections
from . import compat
from .. import exc
def md5_hex(x):
if compat.py3k:
x = x.encode("utf-8")
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
__slots__ = ("warn_only", "_exc_info")
def __init__(self, warn_only=False):
self.warn_only = warn_only
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
if not self.warn_only:
compat.raise_(
exc_value, with_traceback=exc_tb,
)
else:
if not compat.py3k and self._exc_info and self._exc_info[1]:
# emulate Py3K's behavior of telling us when an exception
# occurs in an exception handler.
warn(
"An exception has occurred during handling of a "
"previous exception. The previous exception "
"is:\n %s %s\n" % (self._exc_info[0], self._exc_info[1])
)
self._exc_info = None # remove potential circular references
compat.raise_(value, with_traceback=traceback)
def clsname_as_plain_name(cls):
return " ".join(
n.lower() for n in re.findall(r"([A-Z][a-z]+)", cls.__name__)
)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, "__index__"):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain(
(base,),
compat.itertools_imap(lambda i: base + str(i), range(1000)),
)
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def map_bits(fn, n):
"""Call the given function given each nonzero bit from n."""
while n:
b = n & (~n + 1)
yield fn(b)
n ^= b
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn) and not inspect.ismethod(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, "target", "fn")
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata["name"] = fn.__name__
code = (
"""\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
"""
% metadata
)
decorated = _exec_code_in_env(
code, {targ_name: target, fn_name: fn}, fn.__name__
)
decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location, class_location=None):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = (
"Construct a new :class:`.%s` object. \n\n"
"This constructor is mirrored as a public API function; "
"see :func:`sqlalchemy%s` "
"for a full usage and argument description."
% (target.__name__, location)
)
else:
fn = callable_ = target
doc = (
"This function is mirrored; see :func:`sqlalchemy%s` "
"for a description of arguments." % location
)
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata["name"] = location_name
code = (
"""\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
"""
% metadata
)
env = {"cls": callable_, "symbol": symbol}
exec(code, env)
decorated = env[location_name]
if hasattr(fn, "_linked_to"):
linked_to, linked_to_location = fn._linked_to
linked_to_doc = linked_to.__doc__
if class_location is None:
class_location = "%s.%s" % (target.__module__, target.__name__)
linked_to_doc = inject_docstring_text(
linked_to_doc,
".. container:: inherited_member\n\n "
"Inherited from :func:`sqlalchemy%s`; this constructor "
"creates a :class:`%s` object"
% (linked_to_location, class_location),
1,
)
decorated.__doc__ = linked_to_doc
else:
decorated.__doc__ = fn.__doc__
decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0]
if decorated.__module__ not in sys.modules:
raise ImportError(
"public_factory location %s is not in sys.modules"
% (decorated.__module__,)
)
if compat.py2k or hasattr(fn, "__func__"):
fn.__func__.__doc__ = doc
if not hasattr(fn.__func__, "_linked_to"):
fn.__func__._linked_to = (decorated, location)
else:
fn.__doc__ = doc
if not hasattr(fn, "_linked_to"):
fn._linked_to = (decorated, location)
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def clear(self):
self.impls.clear()
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" % (self.group, name)
)
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def _inspect_func_args(fn):
try:
co_varkeywords = inspect.CO_VARKEYWORDS
except AttributeError:
# https://docs.python.org/3/library/inspect.html
# The flags are specific to CPython, and may not be defined in other
# Python implementations. Furthermore, the flags are an implementation
# detail, and can be removed or deprecated in future Python releases.
spec = compat.inspect_getfullargspec(fn)
return spec[0], bool(spec[2])
else:
# use fn.__code__ plus flags to reduce method call overhead
co = fn.__code__
nargs = co.co_argcount
return (
list(co.co_varnames[:nargs]),
bool(co.co_flags & co_varkeywords),
)
def get_cls_kwargs(cls, _set=None):
r"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getfullargspec() to cut down on method overhead,
as this is used within the Core typing system to create copies of type
objects which is a performance-sensitive operation.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
ctr = cls.__dict__.get("__init__", False)
has_init = (
ctr
and isinstance(ctr, types.FunctionType)
and isinstance(ctr.__code__, types.CodeType)
)
if has_init:
names, has_kw = _inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard("self")
return _set
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getfullargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getfullargspec(fn)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getfullargspec(fn.__func__)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True
)
elif hasattr(fn, "__func__"):
return compat.inspect_getfullargspec(fn.__func__)
elif hasattr(fn, "__call__"):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
spec = fn
args = compat.inspect_formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = "%s[0]" % spec[1]
else:
self_arg = None
apply_pos = compat.inspect_formatargspec(
spec[0], spec[1], spec[2], None, spec[4]
)
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults :]
else:
defaulted_vals = ()
apply_kw = compat.inspect_formatargspec(
name_args,
spec[1],
spec[2],
defaulted_vals,
formatvalue=lambda x: "=" + x,
)
if grouped:
return dict(
args=args,
self_arg=self_arg,
apply_pos=apply_pos,
apply_kw=apply_kw,
)
else:
return dict(
args=args[1:-1],
self_arg=self_arg,
apply_pos=apply_pos[1:-1],
apply_kw=apply_kw[1:-1],
)
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and "(self)" or "self"
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (
grouped
and "(self, *args, **kwargs)"
or "self, *args, **kwargs"
)
return dict(self_arg="self", args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return compat.inspect_getfullargspec(method)
except TypeError:
if method is object.__init__:
return (["self"], None, None, None)
else:
return (["self"], "args", "kwargs", None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
spec = compat.inspect_getfullargspec(insp.__init__)
except TypeError:
continue
else:
default_len = spec.defaults and len(spec.defaults) or 0
if i == 0:
if spec.varargs:
vargs = spec.varargs
if default_len:
pos_args.extend(spec.args[1:-default_len])
else:
pos_args.extend(spec.args[1:])
else:
kw_args.update(
[(arg, missing) for arg in spec.args[1:-default_len]]
)
if default_len:
kw_args.update(
[
(arg, default)
for arg, default in zip(
spec.args[-default_len:], spec.defaults
)
]
)
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
if arg in omit_kwarg:
continue
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
__slots__ = "target", "name", "kwargs", "__weakref__"
def __getstate__(self):
return {
"target": self.target,
"name": self.name,
"kwargs": self.kwargs,
}
def __setstate__(self, state):
self.target = state["target"]
self.name = state["name"]
self.kwargs = state.get("kwargs", ())
def __init__(self, meth, kwargs=()):
self.target = meth.__self__
self.name = meth.__name__
self.kwargs = kwargs
def __call__(self, *arg, **kw):
kw.update(self.kwargs)
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = {cls}
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (
_
for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType)
)
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == "builtins" or not hasattr(c, "__subclasses__"):
continue
else:
if c.__module__ == "__builtin__" or not hasattr(
c, "__subclasses__"
):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(
into_cls,
from_cls,
skip=None,
only=None,
name="self.proxy",
from_instance=None,
):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = (
"__slots__",
"__del__",
"__getattribute__",
"__metaclass__",
"__getstate__",
"__setstate__",
)
dunders = [
m
for m in dir(from_cls)
if (
m.startswith("__")
and m.endswith("__")
and not hasattr(into_cls, m)
and m not in skip
)
]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, "__call__"):
continue
fn = getattr(fn, "im_func", fn)
except AttributeError:
continue
try:
spec = compat.inspect_getfullargspec(fn)
fn_args = compat.inspect_formatargspec(spec[0])
d_args = compat.inspect_formatargspec(spec[0][1:])
except TypeError:
fn_args = "(self, *args, **kw)"
d_args = "(*args, **kw)"
py = (
"def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals()
)
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, "__func__", meth1) is getattr(
meth2, "__func__", meth2
)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError("a class or collection of method names are required")
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith("_")])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and "any of" or "all of"
raise TypeError(
"%r does not implement %s: %s"
% (obj, qualifier, ", ".join(interface))
)
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = "Anonymous" + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError(
"dictionary does not contain required keys %s"
% ", ".join(required - found)
)
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
def memoized_instancemethod(fn):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def oneshot(self, *args, **kw):
result = fn(self, *args, **kw)
def memo(*a, **kw):
return result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
self.__dict__[fn.__name__] = memo
return result
return update_wrapper(oneshot, fn)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class MemoizedSlots(object):
"""Apply memoized items to an object using a __getattr__ scheme.
This allows the functionality of memoized_property and
memoized_instancemethod to be available to a class using __slots__.
"""
__slots__ = ()
def _fallback_getattr(self, key):
raise AttributeError(key)
def __getattr__(self, key):
if key.startswith("_memoized"):
raise AttributeError(key)
elif hasattr(self, "_memoized_attr_%s" % key):
value = getattr(self, "_memoized_attr_%s" % key)()
setattr(self, key, value)
return value
elif hasattr(self, "_memoized_method_%s" % key):
fn = getattr(self, "_memoized_method_%s" % key)
def oneshot(*args, **kw):
result = fn(*args, **kw)
def memo(*a, **kw):
return result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
setattr(self, key, memo)
return result
oneshot.__doc__ = fn.__doc__
return oneshot
else:
return self._fallback_getattr(key)
def dependency_for(modulename, add_to_all=False):
def decorate(obj):
tokens = modulename.split(".")
mod = compat.import_(
".".join(tokens[0:-1]), globals(), locals(), [tokens[-1]]
)
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
if add_to_all and hasattr(mod, "__all__"):
mod.__all__.append(obj.__name__)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(".".join(tokens[0:-1]), tokens[-1])
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ("self", "cls")
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = "lambda %(args)s: fn(%(apply_kw)s)" % {
"args": outer_spec["args"],
"apply_kw": inner_spec["apply_kw"],
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl)
)
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(), [self._il_addtl]
)
def __getattr__(self, key):
if key == "module":
raise ImportError(
"Could not resolve module %s" % self._full_path
)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" % (self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ["true", "yes", "on", "y", "t", "1"]:
return True
elif obj in ["false", "no", "off", "n", "f", "0"]:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaluate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True, dest=None):
r"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if dest is None:
dest = kw
if (
key in kw
and (not isinstance(type_, type) or not isinstance(kw[key], type_))
and kw[key] is not None
):
if type_ is bool and flexi_bool:
dest[key] = asbool(kw[key])
else:
dest[key] = type_(kw[key])
def constructor_copy(obj, cls, *args, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update(
(k, obj.__dict__[k]) for k in names.difference(kw) if k in obj.__dict__
)
return cls(*args, **kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, "__emulates__"):
# canonicalize set vs sets.Set to a standard: the builtin set
if specimen.__emulates__ is not None and issubclass(
specimen.__emulates__, set
):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, "append"):
return list
elif hasattr(specimen, "add"):
return set
elif hasattr(specimen, "set"):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'"
% (name, " or ".join("'%s'" % a for a in argtype), type(arg))
)
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'"
% (name, argtype, type(arg))
)
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, "items"):
return list(dictlike.items())
else:
if hasattr(dictlike, "iteritems"):
return dictlike.iteritems()
elif hasattr(dictlike, "items"):
return iter(dictlike.items())
getter = getattr(dictlike, "__getitem__", getattr(dictlike, "get", None))
if getter is None:
raise TypeError("Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, "iterkeys"):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, "keys"):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError("Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
clsval = self.func(owner)
clsval.__doc__ = self.func.__doc__
return clsval
else:
return self.func(instance)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = "symbol"
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
@classmethod
def parse_user_argument(
cls, arg, choices, name, resolve_symbol_names=False
):
"""Given a user parameter, parse the parameter into a chosen symbol.
The user argument can be a string name that matches the name of a
symbol, or the symbol object itself, or any number of alternate choices
such as True/False/ None etc.
:param arg: the user argument.
:param choices: dictionary of symbol object to list of possible
entries.
:param name: name of the argument. Used in an :class:`.ArgumentError`
that is raised if the parameter doesn't match any available argument.
:param resolve_symbol_names: include the name of each symbol as a valid
entry.
"""
# note using hash lookup is tricky here because symbol's `__hash__`
# is its int value which we don't want included in the lookup
# explicitly, so we iterate and compare each.
for sym, choice in choices.items():
if arg is sym:
return sym
elif resolve_symbol_names and arg == sym.name:
return sym
elif arg in choice:
return sym
if arg is None:
return None
raise exc.ArgumentError("Invalid value for '%s': %r" % (name, arg))
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except Exception:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def ellipses_string(value, len_=25):
try:
if len(value) > len_:
return "%s..." % value[0:len_]
else:
return value
except TypeError:
return value
class _hash_limit_string(compat.text_type):
"""A string subclass that can only be hashed on a maximum amount
of unique values.
This is used for warnings so that we can send out parameterized warnings
without the __warningregistry__ of the module, or the non-overridable
"once" registry within warnings.py, overloading memory,
"""
def __new__(cls, value, num, args):
interpolated = (value % args) + (
" (this warning may be suppressed after %d occurrences)" % num
)
self = super(_hash_limit_string, cls).__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other)
def warn(msg):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
"""
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def warn_limited(msg, args):
"""Issue a warning with a parameterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def only_once(fn, retry_on_exception):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
try:
return once_fn(*arg, **kw)
except:
if retry_on_exception:
once.insert(0, once_fn)
raise
return go
_SQLA_RE = re.compile(r"sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py")
_UNITTEST_RE = re.compile(r"unit(?:2|test2?/)")
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start : end + 1]
NoneType = type(None)
def attrsetter(attrname):
code = "def set(obj, value):" " obj.%s = value" % attrname
env = locals().copy()
exec(code, env)
return env["set"]
class EnsureKWArgType(type):
r"""Apply translation of functions to accept \**kw arguments if they
don't already.
"""
def __init__(cls, clsname, bases, clsdict):
fn_reg = cls.ensure_kwarg
if fn_reg:
for key in clsdict:
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
spec = compat.inspect_getfullargspec(fn)
if not spec.varkw:
clsdict[key] = wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict)
def _wrap_w_kw(self, fn):
def wrap(*arg, **kw):
return fn(*arg)
return update_wrapper(wrap, fn)
def wrap_callable(wrapper, fn):
"""Augment functools.update_wrapper() to work with objects with
a ``__call__()`` method.
:param fn:
object with __call__ method
"""
if hasattr(fn, "__name__"):
return update_wrapper(wrapper, fn)
else:
_f = wrapper
_f.__name__ = fn.__class__.__name__
if hasattr(fn, "__module__"):
_f.__module__ = fn.__module__
if hasattr(fn.__call__, "__doc__") and fn.__call__.__doc__:
_f.__doc__ = fn.__call__.__doc__
elif fn.__doc__:
_f.__doc__ = fn.__doc__
return _f
def quoted_token_parser(value):
"""Parse a dotted identifier with accommodation for quoted names.
Includes support for SQL-style double quotes as a literal character.
E.g.::
>>> quoted_token_parser("name")
["name"]
>>> quoted_token_parser("schema.name")
["schema", "name"]
>>> quoted_token_parser('"Schema"."Name"')
['Schema', 'Name']
>>> quoted_token_parser('"Schema"."Name""Foo"')
['Schema', 'Name""Foo']
"""
if '"' not in value:
return value.split(".")
# 0 = outside of quotes
# 1 = inside of quotes
state = 0
result = [[]]
idx = 0
lv = len(value)
while idx < lv:
char = value[idx]
if char == '"':
if state == 1 and idx < lv - 1 and value[idx + 1] == '"':
result[-1].append('"')
idx += 1
else:
state ^= 1
elif char == "." and state == 0:
result.append([])
else:
result[-1].append(char)
idx += 1
return ["".join(token) for token in result]
def add_parameter_text(params, text):
params = _collections.to_list(params)
def decorate(fn):
doc = fn.__doc__ is not None and fn.__doc__ or ""
if doc:
doc = inject_param_text(doc, {param: text for param in params})
fn.__doc__ = doc
return fn
return decorate
def _dedent_docstring(text):
split_text = text.split("\n", 1)
if len(split_text) == 1:
return text
else:
firstline, remaining = split_text
if not firstline.startswith(" "):
return firstline + "\n" + textwrap.dedent(remaining)
else:
return textwrap.dedent(text)
def inject_docstring_text(doctext, injecttext, pos):
doctext = _dedent_docstring(doctext or "")
lines = doctext.split("\n")
if len(lines) == 1:
lines.append("")
injectlines = textwrap.dedent(injecttext).split("\n")
if injectlines[0]:
injectlines.insert(0, "")
blanks = [num for num, line in enumerate(lines) if not line.strip()]
blanks.insert(0, 0)
inject_pos = blanks[min(pos, len(blanks) - 1)]
lines = lines[0:inject_pos] + injectlines + lines[inject_pos:]
return "\n".join(lines)
def inject_param_text(doctext, inject_params):
doclines = doctext.splitlines()
lines = []
to_inject = None
while doclines:
line = doclines.pop(0)
if to_inject is None:
m = re.match(r"(\s+):param (?:\\\*\*?)?(.+?):", line)
if m:
param = m.group(2)
if param in inject_params:
# default indent to that of :param: plus one
indent = " " * len(m.group(1)) + " "
# but if the next line has text, use that line's
# indentntation
if doclines:
m2 = re.match(r"(\s+)\S", doclines[0])
if m2:
indent = " " * len(m2.group(1))
to_inject = indent + inject_params[param]
elif line.lstrip().startswith(":param "):
lines.append("\n")
lines.append(to_inject)
lines.append("\n")
to_inject = None
elif not line.rstrip():
lines.append(line)
lines.append(to_inject)
lines.append("\n")
to_inject = None
elif line.endswith("::"):
# TODO: this still wont cover if the code example itself has blank
# lines in it, need to detect those via indentation.
lines.append(line)
lines.append(
doclines.pop(0)
) # the blank line following a code example
continue
lines.append(line)
return "\n".join(lines)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/instrumentation.py | """Extensible class instrumentation.
The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate
systems of class instrumentation within the ORM. Class instrumentation
refers to how the ORM places attributes on the class which maintain
data and track changes to that data, as well as event hooks installed
on the class.
.. note::
The extension package is provided for the benefit of integration
with other object management packages, which already perform
their own instrumentation. It is not intended for general use.
For examples of how the instrumentation extension is used,
see the example :ref:`examples_instrumentation`.
"""
import weakref
from .. import util
from ..orm import attributes
from ..orm import base as orm_base
from ..orm import collections
from ..orm import exc as orm_exc
from ..orm import instrumentation as orm_instrumentation
from ..orm.instrumentation import _default_dict_getter
from ..orm.instrumentation import _default_manager_getter
from ..orm.instrumentation import _default_state_getter
from ..orm.instrumentation import ClassManager
from ..orm.instrumentation import InstrumentationFactory
INSTRUMENTATION_MANAGER = "__sa_instrumentation_manager__"
"""Attribute, elects custom instrumentation when present on a mapped class.
Allows a class to specify a slightly or wildly different technique for
tracking changes made to mapped attributes and collections.
Only one instrumentation implementation is allowed in a given object
inheritance hierarchy.
The value of this attribute must be a callable and will be passed a class
object. The callable must return one of:
- An instance of an InstrumentationManager or subclass
- An object implementing all or some of InstrumentationManager (TODO)
- A dictionary of callables, implementing all or some of the above (TODO)
- An instance of a ClassManager or subclass
This attribute is consulted by SQLAlchemy instrumentation
resolution, once the :mod:`sqlalchemy.ext.instrumentation` module
has been imported. If custom finders are installed in the global
instrumentation_finders list, they may or may not choose to honor this
attribute.
"""
def find_native_user_instrumentation_hook(cls):
"""Find user-specified instrumentation management for a class."""
return getattr(cls, INSTRUMENTATION_MANAGER, None)
instrumentation_finders = [find_native_user_instrumentation_hook]
"""An extensible sequence of callables which return instrumentation
implementations
When a class is registered, each callable will be passed a class object.
If None is returned, the
next finder in the sequence is consulted. Otherwise the return must be an
instrumentation factory that follows the same guidelines as
sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER.
By default, the only finder is find_native_user_instrumentation_hook, which
searches for INSTRUMENTATION_MANAGER. If all finders return None, standard
ClassManager instrumentation is used.
"""
class ExtendedInstrumentationRegistry(InstrumentationFactory):
"""Extends :class:`.InstrumentationFactory` with additional
bookkeeping, to accommodate multiple types of
class managers.
"""
_manager_finders = weakref.WeakKeyDictionary()
_state_finders = weakref.WeakKeyDictionary()
_dict_finders = weakref.WeakKeyDictionary()
_extended = False
def _locate_extended_factory(self, class_):
for finder in instrumentation_finders:
factory = finder(class_)
if factory is not None:
manager = self._extended_class_manager(class_, factory)
return manager, factory
else:
return None, None
def _check_conflicts(self, class_, factory):
existing_factories = self._collect_management_factories_for(
class_
).difference([factory])
if existing_factories:
raise TypeError(
"multiple instrumentation implementations specified "
"in %s inheritance hierarchy: %r"
% (class_.__name__, list(existing_factories))
)
def _extended_class_manager(self, class_, factory):
manager = factory(class_)
if not isinstance(manager, ClassManager):
manager = _ClassInstrumentationAdapter(class_, manager)
if factory != ClassManager and not self._extended:
# somebody invoked a custom ClassManager.
# reinstall global "getter" functions with the more
# expensive ones.
self._extended = True
_install_instrumented_lookups()
self._manager_finders[class_] = manager.manager_getter()
self._state_finders[class_] = manager.state_getter()
self._dict_finders[class_] = manager.dict_getter()
return manager
def _collect_management_factories_for(self, cls):
"""Return a collection of factories in play or specified for a
hierarchy.
Traverses the entire inheritance graph of a cls and returns a
collection of instrumentation factories for those classes. Factories
are extracted from active ClassManagers, if available, otherwise
instrumentation_finders is consulted.
"""
hierarchy = util.class_hierarchy(cls)
factories = set()
for member in hierarchy:
manager = self.manager_of_class(member)
if manager is not None:
factories.add(manager.factory)
else:
for finder in instrumentation_finders:
factory = finder(member)
if factory is not None:
break
else:
factory = None
factories.add(factory)
factories.discard(None)
return factories
def unregister(self, class_):
if class_ in self._manager_finders:
del self._manager_finders[class_]
del self._state_finders[class_]
del self._dict_finders[class_]
super(ExtendedInstrumentationRegistry, self).unregister(class_)
def manager_of_class(self, cls):
if cls is None:
return None
try:
finder = self._manager_finders.get(cls, _default_manager_getter)
except TypeError:
# due to weakref lookup on invalid object
return None
else:
return finder(cls)
def state_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._state_finders.get(
instance.__class__, _default_state_getter
)(instance)
def dict_of(self, instance):
if instance is None:
raise AttributeError("None has no persistent state.")
return self._dict_finders.get(
instance.__class__, _default_dict_getter
)(instance)
orm_instrumentation._instrumentation_factory = (
_instrumentation_factory
) = ExtendedInstrumentationRegistry()
orm_instrumentation.instrumentation_finders = instrumentation_finders
class InstrumentationManager(object):
"""User-defined class instrumentation extension.
:class:`.InstrumentationManager` can be subclassed in order
to change
how class instrumentation proceeds. This class exists for
the purposes of integration with other object management
frameworks which would like to entirely modify the
instrumentation methodology of the ORM, and is not intended
for regular usage. For interception of class instrumentation
events, see :class:`.InstrumentationEvents`.
The API for this class should be considered as semi-stable,
and may change slightly with new releases.
"""
# r4361 added a mandatory (cls) constructor to this interface.
# given that, perhaps class_ should be dropped from all of these
# signatures.
def __init__(self, class_):
pass
def manage(self, class_, manager):
setattr(class_, "_default_class_manager", manager)
def dispose(self, class_, manager):
delattr(class_, "_default_class_manager")
def manager_getter(self, class_):
def get(cls):
return cls._default_class_manager
return get
def instrument_attribute(self, class_, key, inst):
pass
def post_configure_attribute(self, class_, key, inst):
pass
def install_descriptor(self, class_, key, inst):
setattr(class_, key, inst)
def uninstall_descriptor(self, class_, key):
delattr(class_, key)
def install_member(self, class_, key, implementation):
setattr(class_, key, implementation)
def uninstall_member(self, class_, key):
delattr(class_, key)
def instrument_collection_class(self, class_, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def get_instance_dict(self, class_, instance):
return instance.__dict__
def initialize_instance_dict(self, class_, instance):
pass
def install_state(self, class_, instance, state):
setattr(instance, "_default_state", state)
def remove_state(self, class_, instance):
delattr(instance, "_default_state")
def state_getter(self, class_):
return lambda instance: getattr(instance, "_default_state")
def dict_getter(self, class_):
return lambda inst: self.get_instance_dict(class_, inst)
class _ClassInstrumentationAdapter(ClassManager):
"""Adapts a user-defined InstrumentationManager to a ClassManager."""
def __init__(self, class_, override):
self._adapted = override
self._get_state = self._adapted.state_getter(class_)
self._get_dict = self._adapted.dict_getter(class_)
ClassManager.__init__(self, class_)
def manage(self):
self._adapted.manage(self.class_, self)
def dispose(self):
self._adapted.dispose(self.class_)
def manager_getter(self):
return self._adapted.manager_getter(self.class_)
def instrument_attribute(self, key, inst, propagated=False):
ClassManager.instrument_attribute(self, key, inst, propagated)
if not propagated:
self._adapted.instrument_attribute(self.class_, key, inst)
def post_configure_attribute(self, key):
super(_ClassInstrumentationAdapter, self).post_configure_attribute(key)
self._adapted.post_configure_attribute(self.class_, key, self[key])
def install_descriptor(self, key, inst):
self._adapted.install_descriptor(self.class_, key, inst)
def uninstall_descriptor(self, key):
self._adapted.uninstall_descriptor(self.class_, key)
def install_member(self, key, implementation):
self._adapted.install_member(self.class_, key, implementation)
def uninstall_member(self, key):
self._adapted.uninstall_member(self.class_, key)
def instrument_collection_class(self, key, collection_class):
return self._adapted.instrument_collection_class(
self.class_, key, collection_class
)
def initialize_collection(self, key, state, factory):
delegate = getattr(self._adapted, "initialize_collection", None)
if delegate:
return delegate(key, state, factory)
else:
return ClassManager.initialize_collection(
self, key, state, factory
)
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
self.setup_instance(instance, state)
return instance
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if self.has_state(instance):
return False
else:
return self.setup_instance(instance)
def setup_instance(self, instance, state=None):
self._adapted.initialize_instance_dict(self.class_, instance)
if state is None:
state = self._state_constructor(instance, self)
# the given instance is assumed to have no state
self._adapted.install_state(self.class_, instance, state)
return state
def teardown_instance(self, instance):
self._adapted.remove_state(self.class_, instance)
def has_state(self, instance):
try:
self._get_state(instance)
except orm_exc.NO_STATE:
return False
else:
return True
def state_getter(self):
return self._get_state
def dict_getter(self):
return self._get_dict
def _install_instrumented_lookups():
"""Replace global class/object management functions
with ExtendedInstrumentationRegistry implementations, which
allow multiple types of class managers to be present,
at the cost of performance.
This function is called only by ExtendedInstrumentationRegistry
and unit tests specific to this behavior.
The _reinstall_default_lookups() function can be called
after this one to re-establish the default functions.
"""
_install_lookups(
dict(
instance_state=_instrumentation_factory.state_of,
instance_dict=_instrumentation_factory.dict_of,
manager_of_class=_instrumentation_factory.manager_of_class,
)
)
def _reinstall_default_lookups():
"""Restore simplified lookups."""
_install_lookups(
dict(
instance_state=_default_state_getter,
instance_dict=_default_dict_getter,
manager_of_class=_default_manager_getter,
)
)
_instrumentation_factory._extended = False
def _install_lookups(lookups):
global instance_state, instance_dict, manager_of_class
instance_state = lookups["instance_state"]
instance_dict = lookups["instance_dict"]
manager_of_class = lookups["manager_of_class"]
orm_base.instance_state = (
attributes.instance_state
) = orm_instrumentation.instance_state = instance_state
orm_base.instance_dict = (
attributes.instance_dict
) = orm_instrumentation.instance_dict = instance_dict
orm_base.manager_of_class = (
attributes.manager_of_class
) = orm_instrumentation.manager_of_class = manager_of_class
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/compiler.py | # ext/compiler.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""Provides an API for creation of custom ClauseElements and compilers.
Synopsis
========
Usage involves the creation of one or more
:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or
more callables defining its compilation::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import ColumnClause
class MyColumn(ColumnClause):
pass
@compiles(MyColumn)
def compile_mycolumn(element, compiler, **kw):
return "[%s]" % element.name
Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`,
the base expression element for named column objects. The ``compiles``
decorator registers itself with the ``MyColumn`` class so that it is invoked
when the object is compiled to a string::
from sqlalchemy import select
s = select([MyColumn('x'), MyColumn('y')])
print(str(s))
Produces::
SELECT [x], [y]
Dialect-specific compilation rules
==================================
Compilers can also be made dialect-specific. The appropriate compiler will be
invoked for the dialect in use::
from sqlalchemy.schema import DDLElement
class AlterColumn(DDLElement):
def __init__(self, column, cmd):
self.column = column
self.cmd = cmd
@compiles(AlterColumn)
def visit_alter_column(element, compiler, **kw):
return "ALTER COLUMN %s ..." % element.column.name
@compiles(AlterColumn, 'postgresql')
def visit_alter_column(element, compiler, **kw):
return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name,
element.column.name)
The second ``visit_alter_table`` will be invoked when any ``postgresql``
dialect is used.
Compiling sub-elements of a custom expression construct
=======================================================
The ``compiler`` argument is the
:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object
can be inspected for any information about the in-progress compilation,
including ``compiler.dialect``, ``compiler.statement`` etc. The
:class:`~sqlalchemy.sql.compiler.SQLCompiler` and
:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()``
method which can be used for compilation of embedded attributes::
from sqlalchemy.sql.expression import Executable, ClauseElement
class InsertFromSelect(Executable, ClauseElement):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s (%s)" % (
compiler.process(element.table, asfrom=True, **kw),
compiler.process(element.select, **kw)
)
insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5))
print(insert)
Produces::
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z
FROM mytable WHERE mytable.x > :x_1)"
.. note::
The above ``InsertFromSelect`` construct is only an example, this actual
functionality is already available using the
:meth:`_expression.Insert.from_select` method.
.. note::
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
enabled. See :ref:`enabling_compiled_autocommit` for this step.
Cross Compiling between SQL and DDL compilers
---------------------------------------------
SQL and DDL constructs are each compiled using different base compilers -
``SQLCompiler`` and ``DDLCompiler``. A common need is to access the
compilation rules of SQL expressions from within a DDL expression. The
``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as
below where we generate a CHECK constraint that embeds a SQL expression::
@compiles(MyConstraint)
def compile_my_constraint(constraint, ddlcompiler, **kw):
kw['literal_binds'] = True
return "CONSTRAINT %s CHECK (%s)" % (
constraint.name,
ddlcompiler.sql_compiler.process(
constraint.expression, **kw)
)
Above, we add an additional flag to the process step as called by
:meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This
indicates that any SQL expression which refers to a :class:`.BindParameter`
object or other "literal" object such as those which refer to strings or
integers should be rendered **in-place**, rather than being referred to as
a bound parameter; when emitting DDL, bound parameters are typically not
supported.
.. _enabling_compiled_autocommit:
Enabling Autocommit on a Construct
==================================
Recall from the section :ref:`autocommit` that the :class:`_engine.Engine`,
when
asked to execute a construct in the absence of a user-defined transaction,
detects if the given construct represents DML or DDL, that is, a data
modification or data definition statement, which requires (or may require,
in the case of DDL) that the transaction generated by the DBAPI be committed
(recall that DBAPI always has a transaction going on regardless of what
SQLAlchemy does). Checking for this is actually accomplished by checking for
the "autocommit" execution option on the construct. When building a
construct like an INSERT derivation, a new DDL type, or perhaps a stored
procedure that alters data, the "autocommit" option needs to be set in order
for the statement to function with "connectionless" execution
(as described in :ref:`dbengine_implicit`).
Currently a quick way to do this is to subclass :class:`.Executable`, then
add the "autocommit" flag to the ``_execution_options`` dictionary (note this
is a "frozen" dictionary which supplies a generative ``union()`` method)::
from sqlalchemy.sql.expression import Executable, ClauseElement
class MyInsertThing(Executable, ClauseElement):
_execution_options = \
Executable._execution_options.union({'autocommit': True})
More succinctly, if the construct is truly similar to an INSERT, UPDATE, or
DELETE, :class:`.UpdateBase` can be used, which already is a subclass
of :class:`.Executable`, :class:`_expression.ClauseElement` and includes the
``autocommit`` flag::
from sqlalchemy.sql.expression import UpdateBase
class MyInsertThing(UpdateBase):
def __init__(self, ...):
...
DDL elements that subclass :class:`.DDLElement` already have the
"autocommit" flag turned on.
Changing the default compilation of existing constructs
=======================================================
The compiler extension applies just as well to the existing constructs. When
overriding the compilation of a built in SQL construct, the @compiles
decorator is invoked upon the appropriate class (be sure to use the class,
i.e. ``Insert`` or ``Select``, instead of the creation function such
as ``insert()`` or ``select()``).
Within the new compilation function, to get at the "original" compilation
routine, use the appropriate visit_XXX method - this
because compiler.process() will call upon the overriding routine and cause
an endless loop. Such as, to add "prefix" to all insert statements::
from sqlalchemy.sql.expression import Insert
@compiles(Insert)
def prefix_inserts(insert, compiler, **kw):
return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)
The above compiler will prefix all INSERT statements with "some prefix" when
compiled.
.. _type_compilation_extension:
Changing Compilation of Types
=============================
``compiler`` works for types, too, such as below where we implement the
MS-SQL specific 'max' keyword for ``String``/``VARCHAR``::
@compiles(String, 'mssql')
@compiles(VARCHAR, 'mssql')
def compile_varchar(element, compiler, **kw):
if element.length == 'max':
return "VARCHAR('max')"
else:
return compiler.visit_VARCHAR(element, **kw)
foo = Table('foo', metadata,
Column('data', VARCHAR('max'))
)
Subclassing Guidelines
======================
A big part of using the compiler extension is subclassing SQLAlchemy
expression constructs. To make this easier, the expression and
schema packages feature a set of "bases" intended for common tasks.
A synopsis is as follows:
* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root
expression class. Any SQL expression can be derived from this base, and is
probably the best choice for longer constructs such as specialized INSERT
statements.
* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all
"column-like" elements. Anything that you'd place in the "columns" clause of
a SELECT statement (as well as order by and group by) can derive from this -
the object will automatically have Python "comparison" behavior.
:class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a
``type`` member which is expression's return type. This can be established
at the instance level in the constructor, or at the class level if its
generally constant::
class timestamp(ColumnElement):
type = TIMESTAMP()
* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a
``ColumnElement`` and a "from clause" like object, and represents a SQL
function or stored procedure type of call. Since most databases support
statements along the line of "SELECT FROM <some function>"
``FunctionElement`` adds in the ability to be used in the FROM clause of a
``select()`` construct::
from sqlalchemy.sql.expression import FunctionElement
class coalesce(FunctionElement):
name = 'coalesce'
@compiles(coalesce)
def compile(element, compiler, **kw):
return "coalesce(%s)" % compiler.process(element.clauses, **kw)
@compiles(coalesce, 'oracle')
def compile(element, compiler, **kw):
if len(element.clauses) > 2:
raise TypeError("coalesce only supports two arguments on Oracle")
return "nvl(%s)" % compiler.process(element.clauses, **kw)
* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions,
like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement``
subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``.
``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the
``execute_at()`` method, allowing the construct to be invoked during CREATE
TABLE and DROP TABLE sequences.
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which
should be used with any expression class that represents a "standalone"
SQL statement that can be passed directly to an ``execute()`` method. It
is already implicit within ``DDLElement`` and ``FunctionElement``.
Further Examples
================
"UTC timestamp" function
-------------------------
A function that works like "CURRENT_TIMESTAMP" except applies the
appropriate conversions so that the time is in UTC time. Timestamps are best
stored in relational databases as UTC, without time zones. UTC so that your
database doesn't think time has gone backwards in the hour when daylight
savings ends, without timezones because timezones are like character
encodings - they're best applied only at the endpoints of an application
(i.e. convert to UTC upon user input, re-apply desired timezone upon display).
For PostgreSQL and Microsoft SQL Server::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def pg_utcnow(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
Example usage::
from sqlalchemy import (
Table, Column, Integer, String, DateTime, MetaData
)
metadata = MetaData()
event = Table("event", metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50), nullable=False),
Column("timestamp", DateTime, server_default=utcnow())
)
"GREATEST" function
-------------------
The "GREATEST" function is given any number of arguments and returns the one
that is of the highest value - its equivalent to Python's ``max``
function. A SQL standard version versus a CASE based version which only
accommodates two arguments::
from sqlalchemy.sql import expression, case
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import Numeric
class greatest(expression.FunctionElement):
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return compiler.visit_function(element)
@compiles(greatest, 'sqlite')
@compiles(greatest, 'mssql')
@compiles(greatest, 'oracle')
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return compiler.process(case([(arg1 > arg2, arg1)], else_=arg2), **kw)
Example usage::
Session.query(Account).\
filter(
greatest(
Account.checking_balance,
Account.savings_balance) > 10000
)
"false" expression
------------------
Render a "false" constant expression, rendering as "0" on platforms that
don't have a "false" constant::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
class sql_false(expression.ColumnElement):
pass
@compiles(sql_false)
def default_false(element, compiler, **kw):
return "false"
@compiles(sql_false, 'mssql')
@compiles(sql_false, 'mysql')
@compiles(sql_false, 'oracle')
def int_false(element, compiler, **kw):
return "0"
Example usage::
from sqlalchemy import select, union_all
exp = union_all(
select([users.c.name, sql_false().label("enrolled")]),
select([customers.c.name, customers.c.enrolled])
)
"""
from .. import exc
from .. import util
from ..sql import visitors
def compiles(class_, *specs):
"""Register a function as a compiler for a
given :class:`_expression.ClauseElement` type."""
def decorate(fn):
# get an existing @compiles handler
existing = class_.__dict__.get("_compiler_dispatcher", None)
# get the original handler. All ClauseElement classes have one
# of these, but some TypeEngine classes will not.
existing_dispatch = getattr(class_, "_compiler_dispatch", None)
if not existing:
existing = _dispatcher()
if existing_dispatch:
def _wrap_existing_dispatch(element, compiler, **kw):
try:
return existing_dispatch(element, compiler, **kw)
except exc.UnsupportedCompilationError as uce:
util.raise_(
exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element)
),
from_=uce,
)
existing.specs["default"] = _wrap_existing_dispatch
# TODO: why is the lambda needed ?
setattr(
class_,
"_compiler_dispatch",
lambda *arg, **kw: existing(*arg, **kw),
)
setattr(class_, "_compiler_dispatcher", existing)
if specs:
for s in specs:
existing.specs[s] = fn
else:
existing.specs["default"] = fn
return fn
return decorate
def deregister(class_):
"""Remove all custom compilers associated with a given
:class:`_expression.ClauseElement` type."""
if hasattr(class_, "_compiler_dispatcher"):
# regenerate default _compiler_dispatch
visitors._generate_dispatch(class_)
# remove custom directive
del class_._compiler_dispatcher
class _dispatcher(object):
def __init__(self):
self.specs = {}
def __call__(self, element, compiler, **kw):
# TODO: yes, this could also switch off of DBAPI in use.
fn = self.specs.get(compiler.dialect.name, None)
if not fn:
try:
fn = self.specs["default"]
except KeyError as ke:
util.raise_(
exc.CompileError(
"%s construct has no default "
"compilation handler." % type(element)
),
replace_context=ke,
)
return fn(element, compiler, **kw)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/orderinglist.py | # ext/orderinglist.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`_orm.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position")
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`_orm.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, alleviating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from .. import util
from ..orm.collections import collection
from ..orm.collections import collection_adapter
__all__ = ["ordering_list"]
def ordering_list(attr, count_from=None, **kw):
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = "count_from_%i" % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop("count_from", None)
if kw.get("ordering_func", None) is None and count_from is not None:
if count_from == 0:
kw["ordering_func"] = count_from_0
elif count_from == 1:
kw["ordering_func"] = count_from_1
else:
kw["ordering_func"] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`_orm.relationship` function.
"""
def __init__(
self, ordering_attr=None, ordering_func=None, reorder_on_append=False
):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (
util.callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
""" Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/hybrid.py | # ext/hybrid.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""Define attributes on ORM-mapped classes that have "hybrid" behavior.
"hybrid" means the attribute has distinct behaviors defined at the
class level and at the instance level.
The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of
method decorator, is around 50 lines of code and has almost no
dependencies on the rest of SQLAlchemy. It can, in theory, work with
any descriptor-based expression system.
Consider a mapping ``Interval``, representing integer ``start`` and ``end``
values. We can define higher level functions on mapped classes that produce SQL
expressions at the class level, and Python expression evaluation at the
instance level. Below, each function decorated with :class:`.hybrid_method` or
:class:`.hybrid_property` may receive ``self`` as an instance of the class, or
as the class itself::
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, aliased
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
Base = declarative_base()
class Interval(Base):
__tablename__ = 'interval'
id = Column(Integer, primary_key=True)
start = Column(Integer, nullable=False)
end = Column(Integer, nullable=False)
def __init__(self, start, end):
self.start = start
self.end = end
@hybrid_property
def length(self):
return self.end - self.start
@hybrid_method
def contains(self, point):
return (self.start <= point) & (point <= self.end)
@hybrid_method
def intersects(self, other):
return self.contains(other.start) | self.contains(other.end)
Above, the ``length`` property returns the difference between the
``end`` and ``start`` attributes. With an instance of ``Interval``,
this subtraction occurs in Python, using normal Python descriptor
mechanics::
>>> i1 = Interval(5, 10)
>>> i1.length
5
When dealing with the ``Interval`` class itself, the :class:`.hybrid_property`
descriptor evaluates the function body given the ``Interval`` class as
the argument, which when evaluated with SQLAlchemy expression mechanics
returns a new SQL expression::
>>> print(Interval.length)
interval."end" - interval.start
>>> print(Session().query(Interval).filter(Interval.length > 10))
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE interval."end" - interval.start > :param_1
ORM methods such as :meth:`_query.Query.filter_by`
generally use ``getattr()`` to
locate attributes, so can also be used with hybrid attributes::
>>> print(Session().query(Interval).filter_by(length=5))
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE interval."end" - interval.start = :param_1
The ``Interval`` class example also illustrates two methods,
``contains()`` and ``intersects()``, decorated with
:class:`.hybrid_method`. This decorator applies the same idea to
methods that :class:`.hybrid_property` applies to attributes. The
methods return boolean values, and take advantage of the Python ``|``
and ``&`` bitwise operators to produce equivalent instance-level and
SQL expression-level boolean behavior::
>>> i1.contains(6)
True
>>> i1.contains(15)
False
>>> i1.intersects(Interval(7, 18))
True
>>> i1.intersects(Interval(25, 29))
False
>>> print(Session().query(Interval).filter(Interval.contains(15)))
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE interval.start <= :start_1 AND interval."end" > :end_1
>>> ia = aliased(Interval)
>>> print(Session().query(Interval, ia).filter(Interval.intersects(ia)))
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end, interval_1.id AS interval_1_id,
interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end
FROM interval, interval AS interval_1
WHERE interval.start <= interval_1.start
AND interval."end" > interval_1.start
OR interval.start <= interval_1."end"
AND interval."end" > interval_1."end"
.. _hybrid_distinct_expression:
Defining Expression Behavior Distinct from Attribute Behavior
--------------------------------------------------------------
Our usage of the ``&`` and ``|`` bitwise operators above was
fortunate, considering our functions operated on two boolean values to
return a new one. In many cases, the construction of an in-Python
function and a SQLAlchemy SQL expression have enough differences that
two separate Python expressions should be defined. The
:mod:`~sqlalchemy.ext.hybrid` decorators define the
:meth:`.hybrid_property.expression` modifier for this purpose. As an
example we'll define the radius of the interval, which requires the
usage of the absolute value function::
from sqlalchemy import func
class Interval(object):
# ...
@hybrid_property
def radius(self):
return abs(self.length) / 2
@radius.expression
def radius(cls):
return func.abs(cls.length) / 2
Above the Python function ``abs()`` is used for instance-level
operations, the SQL function ``ABS()`` is used via the :data:`.func`
object for class-level expressions::
>>> i1.radius
2
>>> print(Session().query(Interval).filter(Interval.radius > 5))
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1
.. note:: When defining an expression for a hybrid property or method, the
expression method **must** retain the name of the original hybrid, else
the new hybrid with the additional state will be attached to the class
with the non-matching name. To use the example above::
class Interval(object):
# ...
@hybrid_property
def radius(self):
return abs(self.length) / 2
# WRONG - the non-matching name will cause this function to be
# ignored
@radius.expression
def radius_expression(cls):
return func.abs(cls.length) / 2
This is also true for other mutator methods, such as
:meth:`.hybrid_property.update_expression`. This is the same behavior
as that of the ``@property`` construct that is part of standard Python.
Defining Setters
----------------
Hybrid properties can also define setter methods. If we wanted
``length`` above, when set, to modify the endpoint value::
class Interval(object):
# ...
@hybrid_property
def length(self):
return self.end - self.start
@length.setter
def length(self, value):
self.end = self.start + value
The ``length(self, value)`` method is now called upon set::
>>> i1 = Interval(5, 10)
>>> i1.length
5
>>> i1.length = 12
>>> i1.end
17
.. _hybrid_bulk_update:
Allowing Bulk ORM Update
------------------------
A hybrid can define a custom "UPDATE" handler for when using the
:meth:`_query.Query.update` method, allowing the hybrid to be used in the
SET clause of the update.
Normally, when using a hybrid with :meth:`_query.Query.update`, the SQL
expression is used as the column that's the target of the SET. If our
``Interval`` class had a hybrid ``start_point`` that linked to
``Interval.start``, this could be substituted directly::
session.query(Interval).update({Interval.start_point: 10})
However, when using a composite hybrid like ``Interval.length``, this
hybrid represents more than one column. We can set up a handler that will
accommodate a value passed to :meth:`_query.Query.update` which can affect
this, using the :meth:`.hybrid_property.update_expression` decorator.
A handler that works similarly to our setter would be::
class Interval(object):
# ...
@hybrid_property
def length(self):
return self.end - self.start
@length.setter
def length(self, value):
self.end = self.start + value
@length.update_expression
def length(cls, value):
return [
(cls.end, cls.start + value)
]
Above, if we use ``Interval.length`` in an UPDATE expression as::
session.query(Interval).update(
{Interval.length: 25}, synchronize_session='fetch')
We'll get an UPDATE statement along the lines of::
UPDATE interval SET end=start + :value
In some cases, the default "evaluate" strategy can't perform the SET
expression in Python; while the addition operator we're using above
is supported, for more complex SET expressions it will usually be necessary
to use either the "fetch" or False synchronization strategy as illustrated
above.
.. versionadded:: 1.2 added support for bulk updates to hybrid properties.
Working with Relationships
--------------------------
There's no essential difference when creating hybrids that work with
related objects as opposed to column-based data. The need for distinct
expressions tends to be greater. The two variants we'll illustrate
are the "join-dependent" hybrid, and the "correlated subquery" hybrid.
Join-Dependent Relationship Hybrid
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider the following declarative
mapping which relates a ``User`` to a ``SavingsAccount``::
from sqlalchemy import Column, Integer, ForeignKey, Numeric, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
Base = declarative_base()
class SavingsAccount(Base):
__tablename__ = 'account'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
balance = Column(Numeric(15, 5))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
accounts = relationship("SavingsAccount", backref="owner")
@hybrid_property
def balance(self):
if self.accounts:
return self.accounts[0].balance
else:
return None
@balance.setter
def balance(self, value):
if not self.accounts:
account = Account(owner=self)
else:
account = self.accounts[0]
account.balance = value
@balance.expression
def balance(cls):
return SavingsAccount.balance
The above hybrid property ``balance`` works with the first
``SavingsAccount`` entry in the list of accounts for this user. The
in-Python getter/setter methods can treat ``accounts`` as a Python
list available on ``self``.
However, at the expression level, it's expected that the ``User`` class will
be used in an appropriate context such that an appropriate join to
``SavingsAccount`` will be present::
>>> print(Session().query(User, User.balance).
... join(User.accounts).filter(User.balance > 5000))
SELECT "user".id AS user_id, "user".name AS user_name,
account.balance AS account_balance
FROM "user" JOIN account ON "user".id = account.user_id
WHERE account.balance > :balance_1
Note however, that while the instance level accessors need to worry
about whether ``self.accounts`` is even present, this issue expresses
itself differently at the SQL expression level, where we basically
would use an outer join::
>>> from sqlalchemy import or_
>>> print (Session().query(User, User.balance).outerjoin(User.accounts).
... filter(or_(User.balance < 5000, User.balance == None)))
SELECT "user".id AS user_id, "user".name AS user_name,
account.balance AS account_balance
FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id
WHERE account.balance < :balance_1 OR account.balance IS NULL
Correlated Subquery Relationship Hybrid
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We can, of course, forego being dependent on the enclosing query's usage
of joins in favor of the correlated subquery, which can portably be packed
into a single column expression. A correlated subquery is more portable, but
often performs more poorly at the SQL level. Using the same technique
illustrated at :ref:`mapper_column_property_sql_expressions`,
we can adjust our ``SavingsAccount`` example to aggregate the balances for
*all* accounts, and use a correlated subquery for the column expression::
from sqlalchemy import Column, Integer, ForeignKey, Numeric, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import select, func
Base = declarative_base()
class SavingsAccount(Base):
__tablename__ = 'account'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
balance = Column(Numeric(15, 5))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
accounts = relationship("SavingsAccount", backref="owner")
@hybrid_property
def balance(self):
return sum(acc.balance for acc in self.accounts)
@balance.expression
def balance(cls):
return select([func.sum(SavingsAccount.balance)]).\
where(SavingsAccount.user_id==cls.id).\
label('total_balance')
The above recipe will give us the ``balance`` column which renders
a correlated SELECT::
>>> print(s.query(User).filter(User.balance > 400))
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE (SELECT sum(account.balance) AS sum_1
FROM account
WHERE account.user_id = "user".id) > :param_1
.. _hybrid_custom_comparators:
Building Custom Comparators
---------------------------
The hybrid property also includes a helper that allows construction of
custom comparators. A comparator object allows one to customize the
behavior of each SQLAlchemy expression operator individually. They
are useful when creating custom types that have some highly
idiosyncratic behavior on the SQL side.
.. note:: The :meth:`.hybrid_property.comparator` decorator introduced
in this section **replaces** the use of the
:meth:`.hybrid_property.expression` decorator.
They cannot be used together.
The example class below allows case-insensitive comparisons on the attribute
named ``word_insensitive``::
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from sqlalchemy import func, Column, Integer, String
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class CaseInsensitiveComparator(Comparator):
def __eq__(self, other):
return func.lower(self.__clause_element__()) == func.lower(other)
class SearchWord(Base):
__tablename__ = 'searchword'
id = Column(Integer, primary_key=True)
word = Column(String(255), nullable=False)
@hybrid_property
def word_insensitive(self):
return self.word.lower()
@word_insensitive.comparator
def word_insensitive(cls):
return CaseInsensitiveComparator(cls.word)
Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()``
SQL function to both sides::
>>> print(Session().query(SearchWord).filter_by(word_insensitive="Trucks"))
SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
FROM searchword
WHERE lower(searchword.word) = lower(:lower_1)
The ``CaseInsensitiveComparator`` above implements part of the
:class:`.ColumnOperators` interface. A "coercion" operation like
lowercasing can be applied to all comparison operations (i.e. ``eq``,
``lt``, ``gt``, etc.) using :meth:`.Operators.operate`::
class CaseInsensitiveComparator(Comparator):
def operate(self, op, other):
return op(func.lower(self.__clause_element__()), func.lower(other))
.. _hybrid_reuse_subclass:
Reusing Hybrid Properties across Subclasses
-------------------------------------------
A hybrid can be referred to from a superclass, to allow modifying
methods like :meth:`.hybrid_property.getter`, :meth:`.hybrid_property.setter`
to be used to redefine those methods on a subclass. This is similar to
how the standard Python ``@property`` object works::
class FirstNameOnly(Base):
# ...
first_name = Column(String)
@hybrid_property
def name(self):
return self.first_name
@name.setter
def name(self, value):
self.first_name = value
class FirstNameLastName(FirstNameOnly):
# ...
last_name = Column(String)
@FirstNameOnly.name.getter
def name(self):
return self.first_name + ' ' + self.last_name
@name.setter
def name(self, value):
self.first_name, self.last_name = value.split(' ', 1)
Above, the ``FirstNameLastName`` class refers to the hybrid from
``FirstNameOnly.name`` to repurpose its getter and setter for the subclass.
When overriding :meth:`.hybrid_property.expression` and
:meth:`.hybrid_property.comparator` alone as the first reference to the
superclass, these names conflict with the same-named accessors on the class-
level :class:`.QueryableAttribute` object returned at the class level. To
override these methods when referring directly to the parent class descriptor,
add the special qualifier :attr:`.hybrid_property.overrides`, which will de-
reference the instrumented attribute back to the hybrid object::
class FirstNameLastName(FirstNameOnly):
# ...
last_name = Column(String)
@FirstNameOnly.name.overrides.expression
def name(cls):
return func.concat(cls.first_name, ' ', cls.last_name)
.. versionadded:: 1.2 Added :meth:`.hybrid_property.getter` as well as the
ability to redefine accessors per-subclass.
Hybrid Value Objects
--------------------
Note in our previous example, if we were to compare the ``word_insensitive``
attribute of a ``SearchWord`` instance to a plain Python string, the plain
Python string would not be coerced to lower case - the
``CaseInsensitiveComparator`` we built, being returned by
``@word_insensitive.comparator``, only applies to the SQL side.
A more comprehensive form of the custom comparator is to construct a *Hybrid
Value Object*. This technique applies the target value or expression to a value
object which is then returned by the accessor in all cases. The value object
allows control of all operations upon the value as well as how compared values
are treated, both on the SQL expression side as well as the Python value side.
Replacing the previous ``CaseInsensitiveComparator`` class with a new
``CaseInsensitiveWord`` class::
class CaseInsensitiveWord(Comparator):
"Hybrid value representing a lower case representation of a word."
def __init__(self, word):
if isinstance(word, basestring):
self.word = word.lower()
elif isinstance(word, CaseInsensitiveWord):
self.word = word.word
else:
self.word = func.lower(word)
def operate(self, op, other):
if not isinstance(other, CaseInsensitiveWord):
other = CaseInsensitiveWord(other)
return op(self.word, other.word)
def __clause_element__(self):
return self.word
def __str__(self):
return self.word
key = 'word'
"Label to apply to Query tuple results"
Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may
be a SQL function, or may be a Python native. By overriding ``operate()`` and
``__clause_element__()`` to work in terms of ``self.word``, all comparison
operations will work against the "converted" form of ``word``, whether it be
SQL side or Python side. Our ``SearchWord`` class can now deliver the
``CaseInsensitiveWord`` object unconditionally from a single hybrid call::
class SearchWord(Base):
__tablename__ = 'searchword'
id = Column(Integer, primary_key=True)
word = Column(String(255), nullable=False)
@hybrid_property
def word_insensitive(self):
return CaseInsensitiveWord(self.word)
The ``word_insensitive`` attribute now has case-insensitive comparison behavior
universally, including SQL expression vs. Python expression (note the Python
value is converted to lower case on the Python side here)::
>>> print(Session().query(SearchWord).filter_by(word_insensitive="Trucks"))
SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
FROM searchword
WHERE lower(searchword.word) = :lower_1
SQL expression versus SQL expression::
>>> sw1 = aliased(SearchWord)
>>> sw2 = aliased(SearchWord)
>>> print(Session().query(
... sw1.word_insensitive,
... sw2.word_insensitive).\
... filter(
... sw1.word_insensitive > sw2.word_insensitive
... ))
SELECT lower(searchword_1.word) AS lower_1,
lower(searchword_2.word) AS lower_2
FROM searchword AS searchword_1, searchword AS searchword_2
WHERE lower(searchword_1.word) > lower(searchword_2.word)
Python only expression::
>>> ws1 = SearchWord(word="SomeWord")
>>> ws1.word_insensitive == "sOmEwOrD"
True
>>> ws1.word_insensitive == "XOmEwOrX"
False
>>> print(ws1.word_insensitive)
someword
The Hybrid Value pattern is very useful for any kind of value that may have
multiple representations, such as timestamps, time deltas, units of
measurement, currencies and encrypted passwords.
.. seealso::
`Hybrids and Value Agnostic Types
<http://techspot.zzzeek.org/2011/10/21/hybrids-and-value-agnostic-types/>`_
- on the techspot.zzzeek.org blog
`Value Agnostic Types, Part II
<http://techspot.zzzeek.org/2011/10/29/value-agnostic-types-part-ii/>`_ -
on the techspot.zzzeek.org blog
.. _hybrid_transformers:
Building Transformers
----------------------
A *transformer* is an object which can receive a :class:`_query.Query`
object and
return a new one. The :class:`_query.Query` object includes a method
:meth:`.with_transformation` that returns a new :class:`_query.Query`
transformed by
the given function.
We can combine this with the :class:`.Comparator` class to produce one type
of recipe which can both set up the FROM clause of a query as well as assign
filtering criterion.
Consider a mapped class ``Node``, which assembles using adjacency list into a
hierarchical tree pattern::
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Node(Base):
__tablename__ = 'node'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('node.id'))
parent = relationship("Node", remote_side=id)
Suppose we wanted to add an accessor ``grandparent``. This would return the
``parent`` of ``Node.parent``. When we have an instance of ``Node``, this is
simple::
from sqlalchemy.ext.hybrid import hybrid_property
class Node(Base):
# ...
@hybrid_property
def grandparent(self):
return self.parent.parent
For the expression, things are not so clear. We'd need to construct a
:class:`_query.Query` where we :meth:`_query.Query.join` twice along ``Node.
parent`` to
get to the ``grandparent``. We can instead return a transforming callable
that we'll combine with the :class:`.Comparator` class to receive any
:class:`_query.Query` object, and return a new one that's joined to the
``Node.parent`` attribute and filtered based on the given criterion::
from sqlalchemy.ext.hybrid import Comparator
class GrandparentTransformer(Comparator):
def operate(self, op, other):
def transform(q):
cls = self.__clause_element__()
parent_alias = aliased(cls)
return q.join(parent_alias, cls.parent).\
filter(op(parent_alias.parent, other))
return transform
Base = declarative_base()
class Node(Base):
__tablename__ = 'node'
id =Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('node.id'))
parent = relationship("Node", remote_side=id)
@hybrid_property
def grandparent(self):
return self.parent.parent
@grandparent.comparator
def grandparent(cls):
return GrandparentTransformer(cls)
The ``GrandparentTransformer`` overrides the core :meth:`.Operators.operate`
method at the base of the :class:`.Comparator` hierarchy to return a query-
transforming callable, which then runs the given comparison operation in a
particular context. Such as, in the example above, the ``operate`` method is
called, given the :attr:`.Operators.eq` callable as well as the right side of
the comparison ``Node(id=5)``. A function ``transform`` is then returned which
will transform a :class:`_query.Query` first to join to ``Node.parent``,
then to
compare ``parent_alias`` using :attr:`.Operators.eq` against the left and right
sides, passing into :class:`_query.Query.filter`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.orm import Session
>>> session = Session()
{sql}>>> session.query(Node).\
... with_transformation(Node.grandparent==Node(id=5)).\
... all()
SELECT node.id AS node_id, node.parent_id AS node_parent_id
FROM node JOIN node AS node_1 ON node_1.id = node.parent_id
WHERE :param_1 = node_1.parent_id
{stop}
We can modify the pattern to be more verbose but flexible by separating the
"join" step from the "filter" step. The tricky part here is ensuring that
successive instances of ``GrandparentTransformer`` use the same
:class:`.AliasedClass` object against ``Node``. Below we use a simple
memoizing approach that associates a ``GrandparentTransformer`` with each
class::
class Node(Base):
# ...
@grandparent.comparator
def grandparent(cls):
# memoize a GrandparentTransformer
# per class
if '_gp' not in cls.__dict__:
cls._gp = GrandparentTransformer(cls)
return cls._gp
class GrandparentTransformer(Comparator):
def __init__(self, cls):
self.parent_alias = aliased(cls)
@property
def join(self):
def go(q):
return q.join(self.parent_alias, Node.parent)
return go
def operate(self, op, other):
return op(self.parent_alias.parent, other)
.. sourcecode:: pycon+sql
{sql}>>> session.query(Node).\
... with_transformation(Node.grandparent.join).\
... filter(Node.grandparent==Node(id=5))
SELECT node.id AS node_id, node.parent_id AS node_parent_id
FROM node JOIN node AS node_1 ON node_1.id = node.parent_id
WHERE :param_1 = node_1.parent_id
{stop}
The "transformer" pattern is an experimental pattern that starts to make usage
of some functional programming paradigms. While it's only recommended for
advanced and/or patient developers, there's probably a whole lot of amazing
things it can be used for.
""" # noqa
from .. import util
from ..orm import attributes
from ..orm import interfaces
HYBRID_METHOD = util.symbol("HYBRID_METHOD")
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.hybrid_method`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
.. seealso::
:attr:`_orm.Mapper.all_orm_attributes`
"""
HYBRID_PROPERTY = util.symbol("HYBRID_PROPERTY")
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.hybrid_method`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
.. seealso::
:attr:`_orm.Mapper.all_orm_attributes`
"""
class hybrid_method(interfaces.InspectionAttrInfo):
"""A decorator which allows definition of a Python object method with both
instance-level and class-level behavior.
"""
is_attribute = True
extension_type = HYBRID_METHOD
def __init__(self, func, expr=None):
"""Create a new :class:`.hybrid_method`.
Usage is typically via decorator::
from sqlalchemy.ext.hybrid import hybrid_method
class SomeClass(object):
@hybrid_method
def value(self, x, y):
return self._value + x + y
@value.expression
def value(self, x, y):
return func.some_function(self._value, x, y)
"""
self.func = func
self.expression(expr or func)
def __get__(self, instance, owner):
if instance is None:
return self.expr.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
def expression(self, expr):
"""Provide a modifying decorator that defines a
SQL-expression producing method."""
self.expr = expr
if not self.expr.__doc__:
self.expr.__doc__ = self.func.__doc__
return self
class hybrid_property(interfaces.InspectionAttrInfo):
"""A decorator which allows definition of a Python descriptor with both
instance-level and class-level behavior.
"""
is_attribute = True
extension_type = HYBRID_PROPERTY
def __init__(
self,
fget,
fset=None,
fdel=None,
expr=None,
custom_comparator=None,
update_expr=None,
):
"""Create a new :class:`.hybrid_property`.
Usage is typically via decorator::
from sqlalchemy.ext.hybrid import hybrid_property
class SomeClass(object):
@hybrid_property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
"""
self.fget = fget
self.fset = fset
self.fdel = fdel
self.expr = expr
self.custom_comparator = custom_comparator
self.update_expr = update_expr
util.update_wrapper(self, fget)
def __get__(self, instance, owner):
if instance is None:
return self._expr_comparator(owner)
else:
return self.fget(instance)
def __set__(self, instance, value):
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(instance, value)
def __delete__(self, instance):
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(instance)
def _copy(self, **kw):
defaults = {
key: value
for key, value in self.__dict__.items()
if not key.startswith("_")
}
defaults.update(**kw)
return type(self)(**defaults)
@property
def overrides(self):
"""Prefix for a method that is overriding an existing attribute.
The :attr:`.hybrid_property.overrides` accessor just returns
this hybrid object, which when called at the class level from
a parent class, will de-reference the "instrumented attribute"
normally returned at this level, and allow modifying decorators
like :meth:`.hybrid_property.expression` and
:meth:`.hybrid_property.comparator`
to be used without conflicting with the same-named attributes
normally present on the :class:`.QueryableAttribute`::
class SuperClass(object):
# ...
@hybrid_property
def foobar(self):
return self._foobar
class SubClass(SuperClass):
# ...
@SuperClass.foobar.overrides.expression
def foobar(cls):
return func.subfoobar(self._foobar)
.. versionadded:: 1.2
.. seealso::
:ref:`hybrid_reuse_subclass`
"""
return self
def getter(self, fget):
"""Provide a modifying decorator that defines a getter method.
.. versionadded:: 1.2
"""
return self._copy(fget=fget)
def setter(self, fset):
"""Provide a modifying decorator that defines a setter method."""
return self._copy(fset=fset)
def deleter(self, fdel):
"""Provide a modifying decorator that defines a deletion method."""
return self._copy(fdel=fdel)
def expression(self, expr):
"""Provide a modifying decorator that defines a SQL-expression
producing method.
When a hybrid is invoked at the class level, the SQL expression given
here is wrapped inside of a specialized :class:`.QueryableAttribute`,
which is the same kind of object used by the ORM to represent other
mapped attributes. The reason for this is so that other class-level
attributes such as docstrings and a reference to the hybrid itself may
be maintained within the structure that's returned, without any
modifications to the original SQL expression passed in.
.. note::
when referring to a hybrid property from an owning class (e.g.
``SomeClass.some_hybrid``), an instance of
:class:`.QueryableAttribute` is returned, representing the
expression or comparator object as well as this hybrid object.
However, that object itself has accessors called ``expression`` and
``comparator``; so when attempting to override these decorators on a
subclass, it may be necessary to qualify it using the
:attr:`.hybrid_property.overrides` modifier first. See that
modifier for details.
.. seealso::
:ref:`hybrid_distinct_expression`
"""
return self._copy(expr=expr)
def comparator(self, comparator):
"""Provide a modifying decorator that defines a custom
comparator producing method.
The return value of the decorated method should be an instance of
:class:`~.hybrid.Comparator`.
.. note:: The :meth:`.hybrid_property.comparator` decorator
**replaces** the use of the :meth:`.hybrid_property.expression`
decorator. They cannot be used together.
When a hybrid is invoked at the class level, the
:class:`~.hybrid.Comparator` object given here is wrapped inside of a
specialized :class:`.QueryableAttribute`, which is the same kind of
object used by the ORM to represent other mapped attributes. The
reason for this is so that other class-level attributes such as
docstrings and a reference to the hybrid itself may be maintained
within the structure that's returned, without any modifications to the
original comparator object passed in.
.. note::
when referring to a hybrid property from an owning class (e.g.
``SomeClass.some_hybrid``), an instance of
:class:`.QueryableAttribute` is returned, representing the
expression or comparator object as this hybrid object. However,
that object itself has accessors called ``expression`` and
``comparator``; so when attempting to override these decorators on a
subclass, it may be necessary to qualify it using the
:attr:`.hybrid_property.overrides` modifier first. See that
modifier for details.
"""
return self._copy(custom_comparator=comparator)
def update_expression(self, meth):
"""Provide a modifying decorator that defines an UPDATE tuple
producing method.
The method accepts a single value, which is the value to be
rendered into the SET clause of an UPDATE statement. The method
should then process this value into individual column expressions
that fit into the ultimate SET clause, and return them as a
sequence of 2-tuples. Each tuple
contains a column expression as the key and a value to be rendered.
E.g.::
class Person(Base):
# ...
first_name = Column(String)
last_name = Column(String)
@hybrid_property
def fullname(self):
return first_name + " " + last_name
@fullname.update_expression
def fullname(cls, value):
fname, lname = value.split(" ", 1)
return [
(cls.first_name, fname),
(cls.last_name, lname)
]
.. versionadded:: 1.2
"""
return self._copy(update_expr=meth)
@util.memoized_property
def _expr_comparator(self):
if self.custom_comparator is not None:
return self._get_comparator(self.custom_comparator)
elif self.expr is not None:
return self._get_expr(self.expr)
else:
return self._get_expr(self.fget)
def _get_expr(self, expr):
def _expr(cls):
return ExprComparator(cls, expr(cls), self)
util.update_wrapper(_expr, expr)
return self._get_comparator(_expr)
def _get_comparator(self, comparator):
proxy_attr = attributes.create_proxied_attribute(self)
def expr_comparator(owner):
return proxy_attr(
owner,
self.__name__,
self,
comparator(owner),
doc=comparator.__doc__ or self.__doc__,
)
return expr_comparator
class Comparator(interfaces.PropComparator):
"""A helper class that allows easy construction of custom
:class:`~.orm.interfaces.PropComparator`
classes for usage with hybrids."""
property = None
def __init__(self, expression):
self.expression = expression
def __clause_element__(self):
expr = self.expression
if hasattr(expr, "__clause_element__"):
expr = expr.__clause_element__()
return expr
def adapt_to_entity(self, adapt_to_entity):
# interesting....
return self
class ExprComparator(Comparator):
def __init__(self, cls, expression, hybrid):
self.cls = cls
self.expression = expression
self.hybrid = hybrid
def __getattr__(self, key):
return getattr(self.expression, key)
@property
def info(self):
return self.hybrid.info
def _bulk_update_tuples(self, value):
if isinstance(self.expression, attributes.QueryableAttribute):
return self.expression._bulk_update_tuples(value)
elif self.hybrid.update_expr is not None:
return self.hybrid.update_expr(self.cls, value)
else:
return [(self.expression, value)]
@property
def property(self):
return self.expression.property
def operate(self, op, *other, **kwargs):
return op(self.expression, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.expression, **kwargs)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/__init__.py | # ext/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import util as _sa_util
_sa_util.dependencies.resolve_all("sqlalchemy.ext")
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/indexable.py | # ext/index.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define attributes on ORM-mapped classes that have "index" attributes for
columns with :class:`_types.Indexable` types.
"index" means the attribute is associated with an element of an
:class:`_types.Indexable` column with the predefined index to access it.
The :class:`_types.Indexable` types include types such as
:class:`_types.ARRAY`, :class:`_types.JSON` and
:class:`_postgresql.HSTORE`.
The :mod:`~sqlalchemy.ext.indexable` extension provides
:class:`_schema.Column`-like interface for any element of an
:class:`_types.Indexable` typed column. In simple cases, it can be
treated as a :class:`_schema.Column` - mapped attribute.
.. versionadded:: 1.1
Synopsis
========
Given ``Person`` as a model with a primary key and JSON data field.
While this field may have any number of elements encoded within it,
we would like to refer to the element called ``name`` individually
as a dedicated attribute which behaves like a standalone column::
from sqlalchemy import Column, JSON, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.indexable import index_property
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
data = Column(JSON)
name = index_property('data', 'name')
Above, the ``name`` attribute now behaves like a mapped column. We
can compose a new ``Person`` and set the value of ``name``::
>>> person = Person(name='Alchemist')
The value is now accessible::
>>> person.name
'Alchemist'
Behind the scenes, the JSON field was initialized to a new blank dictionary
and the field was set::
>>> person.data
{"name": "Alchemist'}
The field is mutable in place::
>>> person.name = 'Renamed'
>>> person.name
'Renamed'
>>> person.data
{'name': 'Renamed'}
When using :class:`.index_property`, the change that we make to the indexable
structure is also automatically tracked as history; we no longer need
to use :class:`~.mutable.MutableDict` in order to track this change
for the unit of work.
Deletions work normally as well::
>>> del person.name
>>> person.data
{}
Above, deletion of ``person.name`` deletes the value from the dictionary,
but not the dictionary itself.
A missing key will produce ``AttributeError``::
>>> person = Person()
>>> person.name
...
AttributeError: 'name'
Unless you set a default value::
>>> class Person(Base):
>>> __tablename__ = 'person'
>>>
>>> id = Column(Integer, primary_key=True)
>>> data = Column(JSON)
>>>
>>> name = index_property('data', 'name', default=None) # See default
>>> person = Person()
>>> print(person.name)
None
The attributes are also accessible at the class level.
Below, we illustrate ``Person.name`` used to generate
an indexed SQL criteria::
>>> from sqlalchemy.orm import Session
>>> session = Session()
>>> query = session.query(Person).filter(Person.name == 'Alchemist')
The above query is equivalent to::
>>> query = session.query(Person).filter(Person.data['name'] == 'Alchemist')
Multiple :class:`.index_property` objects can be chained to produce
multiple levels of indexing::
from sqlalchemy import Column, JSON, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.indexable import index_property
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
data = Column(JSON)
birthday = index_property('data', 'birthday')
year = index_property('birthday', 'year')
month = index_property('birthday', 'month')
day = index_property('birthday', 'day')
Above, a query such as::
q = session.query(Person).filter(Person.year == '1980')
On a PostgreSQL backend, the above query will render as::
SELECT person.id, person.data
FROM person
WHERE person.data -> %(data_1)s -> %(param_1)s = %(param_2)s
Default Values
==============
:class:`.index_property` includes special behaviors for when the indexed
data structure does not exist, and a set operation is called:
* For an :class:`.index_property` that is given an integer index value,
the default data structure will be a Python list of ``None`` values,
at least as long as the index value; the value is then set at its
place in the list. This means for an index value of zero, the list
will be initialized to ``[None]`` before setting the given value,
and for an index value of five, the list will be initialized to
``[None, None, None, None, None]`` before setting the fifth element
to the given value. Note that an existing list is **not** extended
in place to receive a value.
* for an :class:`.index_property` that is given any other kind of index
value (e.g. strings usually), a Python dictionary is used as the
default data structure.
* The default data structure can be set to any Python callable using the
:paramref:`.index_property.datatype` parameter, overriding the previous
rules.
Subclassing
===========
:class:`.index_property` can be subclassed, in particular for the common
use case of providing coercion of values or SQL expressions as they are
accessed. Below is a common recipe for use with a PostgreSQL JSON type,
where we want to also include automatic casting plus ``astext()``::
class pg_json_property(index_property):
def __init__(self, attr_name, index, cast_type):
super(pg_json_property, self).__init__(attr_name, index)
self.cast_type = cast_type
def expr(self, model):
expr = super(pg_json_property, self).expr(model)
return expr.astext.cast(self.cast_type)
The above subclass can be used with the PostgreSQL-specific
version of :class:`_postgresql.JSON`::
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import JSON
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
data = Column(JSON)
age = pg_json_property('data', 'age', Integer)
The ``age`` attribute at the instance level works as before; however
when rendering SQL, PostgreSQL's ``->>`` operator will be used
for indexed access, instead of the usual index operator of ``->``::
>>> query = session.query(Person).filter(Person.age < 20)
The above query will render::
SELECT person.id, person.data
FROM person
WHERE CAST(person.data ->> %(data_1)s AS INTEGER) < %(param_1)s
""" # noqa
from __future__ import absolute_import
from .. import inspect
from .. import util
from ..ext.hybrid import hybrid_property
from ..orm.attributes import flag_modified
__all__ = ["index_property"]
class index_property(hybrid_property): # noqa
"""A property generator. The generated property describes an object
attribute that corresponds to an :class:`_types.Indexable`
column.
.. versionadded:: 1.1
.. seealso::
:mod:`sqlalchemy.ext.indexable`
"""
_NO_DEFAULT_ARGUMENT = object()
def __init__(
self,
attr_name,
index,
default=_NO_DEFAULT_ARGUMENT,
datatype=None,
mutable=True,
onebased=True,
):
"""Create a new :class:`.index_property`.
:param attr_name:
An attribute name of an `Indexable` typed column, or other
attribute that returns an indexable structure.
:param index:
The index to be used for getting and setting this value. This
should be the Python-side index value for integers.
:param default:
A value which will be returned instead of `AttributeError`
when there is not a value at given index.
:param datatype: default datatype to use when the field is empty.
By default, this is derived from the type of index used; a
Python list for an integer index, or a Python dictionary for
any other style of index. For a list, the list will be
initialized to a list of None values that is at least
``index`` elements long.
:param mutable: if False, writes and deletes to the attribute will
be disallowed.
:param onebased: assume the SQL representation of this value is
one-based; that is, the first index in SQL is 1, not zero.
"""
if mutable:
super(index_property, self).__init__(
self.fget, self.fset, self.fdel, self.expr
)
else:
super(index_property, self).__init__(
self.fget, None, None, self.expr
)
self.attr_name = attr_name
self.index = index
self.default = default
is_numeric = isinstance(index, int)
onebased = is_numeric and onebased
if datatype is not None:
self.datatype = datatype
else:
if is_numeric:
self.datatype = lambda: [None for x in range(index + 1)]
else:
self.datatype = dict
self.onebased = onebased
def _fget_default(self, err=None):
if self.default == self._NO_DEFAULT_ARGUMENT:
util.raise_(AttributeError(self.attr_name), replace_context=err)
else:
return self.default
def fget(self, instance):
attr_name = self.attr_name
column_value = getattr(instance, attr_name)
if column_value is None:
return self._fget_default()
try:
value = column_value[self.index]
except (KeyError, IndexError) as err:
return self._fget_default(err)
else:
return value
def fset(self, instance, value):
attr_name = self.attr_name
column_value = getattr(instance, attr_name, None)
if column_value is None:
column_value = self.datatype()
setattr(instance, attr_name, column_value)
column_value[self.index] = value
setattr(instance, attr_name, column_value)
if attr_name in inspect(instance).mapper.attrs:
flag_modified(instance, attr_name)
def fdel(self, instance):
attr_name = self.attr_name
column_value = getattr(instance, attr_name)
if column_value is None:
raise AttributeError(self.attr_name)
try:
del column_value[self.index]
except KeyError as err:
util.raise_(AttributeError(self.attr_name), replace_context=err)
else:
setattr(instance, attr_name, column_value)
flag_modified(instance, attr_name)
def expr(self, model):
column = getattr(model, self.attr_name)
index = self.index
if self.onebased:
index += 1
return column[index]
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/serializer.py | # ext/serializer.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
allowing "contextual" deserialization.
Any SQLAlchemy query structure, either based on sqlalchemy.sql.*
or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session
etc. which are referenced by the structure are not persisted in serialized
form, but are instead re-associated with the query structure
when it is deserialized.
Usage is nearly the same as that of the standard Python pickle module::
from sqlalchemy.ext.serializer import loads, dumps
metadata = MetaData(bind=some_engine)
Session = scoped_session(sessionmaker())
# ... define mappers
query = Session.query(MyClass).
filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
# pickle the query
serialized = dumps(query)
# unpickle. Pass in metadata + scoped_session
query2 = loads(serialized, metadata, Session)
print query2.all()
Similar restrictions as when using raw pickle apply; mapped classes must be
themselves be pickleable, meaning they are importable from a module-level
namespace.
The serializer module is only appropriate for query structures. It is not
needed for:
* instances of user-defined classes. These contain no references to engines,
sessions or expression constructs in the typical case and can be serialized
directly.
* Table metadata that is to be loaded entirely from the serialized structure
(i.e. is not already declared in the application). Regular
pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object,
typically one which was reflected from an existing database at some previous
point in time. The serializer module is specifically for the opposite case,
where the Table metadata is already present in memory.
"""
import re
from .. import Column
from .. import Table
from ..engine import Engine
from ..orm import class_mapper
from ..orm.attributes import QueryableAttribute
from ..orm.interfaces import MapperProperty
from ..orm.mapper import Mapper
from ..orm.session import Session
from ..util import b64decode
from ..util import b64encode
from ..util import byte_buffer
from ..util import pickle
from ..util import text_type
__all__ = ["Serializer", "Deserializer", "dumps", "loads"]
def Serializer(*args, **kw):
pickler = pickle.Pickler(*args, **kw)
def persistent_id(obj):
# print "serializing:", repr(obj)
if isinstance(obj, QueryableAttribute):
cls = obj.impl.class_
key = obj.impl.key
id_ = "attribute:" + key + ":" + b64encode(pickle.dumps(cls))
elif isinstance(obj, Mapper) and not obj.non_primary:
id_ = "mapper:" + b64encode(pickle.dumps(obj.class_))
elif isinstance(obj, MapperProperty) and not obj.parent.non_primary:
id_ = (
"mapperprop:"
+ b64encode(pickle.dumps(obj.parent.class_))
+ ":"
+ obj.key
)
elif isinstance(obj, Table):
id_ = "table:" + text_type(obj.key)
elif isinstance(obj, Column) and isinstance(obj.table, Table):
id_ = (
"column:" + text_type(obj.table.key) + ":" + text_type(obj.key)
)
elif isinstance(obj, Session):
id_ = "session:"
elif isinstance(obj, Engine):
id_ = "engine:"
else:
return None
return id_
pickler.persistent_id = persistent_id
return pickler
our_ids = re.compile(
r"(mapperprop|mapper|table|column|session|attribute|engine):(.*)"
)
def Deserializer(file, metadata=None, scoped_session=None, engine=None):
unpickler = pickle.Unpickler(file)
def get_engine():
if engine:
return engine
elif scoped_session and scoped_session().bind:
return scoped_session().bind
elif metadata and metadata.bind:
return metadata.bind
else:
return None
def persistent_load(id_):
m = our_ids.match(text_type(id_))
if not m:
return None
else:
type_, args = m.group(1, 2)
if type_ == "attribute":
key, clsarg = args.split(":")
cls = pickle.loads(b64decode(clsarg))
return getattr(cls, key)
elif type_ == "mapper":
cls = pickle.loads(b64decode(args))
return class_mapper(cls)
elif type_ == "mapperprop":
mapper, keyname = args.split(":")
cls = pickle.loads(b64decode(mapper))
return class_mapper(cls).attrs[keyname]
elif type_ == "table":
return metadata.tables[args]
elif type_ == "column":
table, colname = args.split(":")
return metadata.tables[table].c[colname]
elif type_ == "session":
return scoped_session()
elif type_ == "engine":
return get_engine()
else:
raise Exception("Unknown token: %s" % type_)
unpickler.persistent_load = persistent_load
return unpickler
def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL):
buf = byte_buffer()
pickler = Serializer(buf, protocol)
pickler.dump(obj)
return buf.getvalue()
def loads(data, metadata=None, scoped_session=None, engine=None):
buf = byte_buffer(data)
unpickler = Deserializer(buf, metadata, scoped_session, engine)
return unpickler.load()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/baked.py | # sqlalchemy/ext/baked.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
import copy
import logging
from .. import exc as sa_exc
from .. import util
from ..orm import exc as orm_exc
from ..orm import strategy_options
from ..orm.query import Query
from ..orm.session import Session
from ..sql import func
from ..sql import literal_column
from ..sql import util as sql_util
log = logging.getLogger(__name__)
class Bakery(object):
"""Callable which returns a :class:`.BakedQuery`.
This object is returned by the class method
:meth:`.BakedQuery.bakery`. It exists as an object
so that the "cache" can be easily inspected.
.. versionadded:: 1.2
"""
__slots__ = "cls", "cache"
def __init__(self, cls_, cache):
self.cls = cls_
self.cache = cache
def __call__(self, initial_fn, *args):
return self.cls(self.cache, initial_fn, args)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = "steps", "_bakery", "_cache_key", "_spoiled"
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200, _size_alert=None):
"""Construct a new bakery.
:return: an instance of :class:`.Bakery`
"""
return Bakery(cls, util.LRUCache(size, size_alert=_size_alert))
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`_query.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full and not self._spoiled:
_spoil_point = self._clone()
_spoil_point._cache_key += ("_query_only",)
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _effective_key(self, session):
"""Return the key that actually goes into the cache dictionary for
this :class:`.BakedQuery`, taking into account the given
:class:`.Session`.
This basically means we also will include the session's query_class,
as the actual :class:`_query.Query` object is part of what's cached
and needs to match the type of :class:`_query.Query` that a later
session will want to use.
"""
return self._cache_key + (session._query_cls,)
def _with_lazyload_options(self, options, effective_path, cache_path=None):
"""Cloning version of _add_lazyload_options.
"""
q = self._clone()
q._add_lazyload_options(options, effective_path, cache_path=cache_path)
return q
def _add_lazyload_options(self, options, effective_path, cache_path=None):
"""Used by per-state lazy loaders to add options to the
"lazy load" query from a parent query.
Creates a cache key based on given load path and query options;
if a repeatable cache key cannot be generated, the query is
"spoiled" so that it won't use caching.
"""
key = ()
if not cache_path:
cache_path = effective_path
if cache_path.path[0].is_aliased_class:
# paths that are against an AliasedClass are unsafe to cache
# with since the AliasedClass is an ad-hoc object.
self.spoil(full=True)
else:
for opt in options:
cache_key = opt._generate_cache_key(cache_path)
if cache_key is False:
self.spoil(full=True)
elif cache_key is not None:
key += cache_key
self.add_criteria(
lambda q: q._with_current_path(
effective_path
)._conditional_options(*options),
cache_path.path,
key,
)
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._effective_key(session), None)
if query is None:
query = self._as_query(session)
self._bakery[self._effective_key(session)] = query.with_session(
None
)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
context = query._compile_context()
self._bake_subquery_loaders(session, context)
context.session = None
context.query = query = context.query.with_session(None)
query._execution_options = query._execution_options.union(
{"compiled_cache": self._bakery}
)
# we'll be holding onto the query for some of its state,
# so delete some compilation-use-only attributes that can take up
# space
for attr in (
"_correlate",
"_from_obj",
"_mapper_adapter_map",
"_joinpath",
"_joinpoint",
):
query.__dict__.pop(attr, None)
# if the query is not safe to cache, we still do everything as though
# we did cache it, since the receiver of _bake() assumes subqueryload
# context was set up, etc.
if context.query._bake_ok:
self._bakery[self._effective_key(session)] = context
return context
def to_query(self, query_or_session):
"""Return the :class:`_query.Query` object for use as a subquery.
This method should be used within the lambda callable being used
to generate a step of an enclosing :class:`.BakedQuery`. The
parameter should normally be the :class:`_query.Query` object that
is passed to the lambda::
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(
User.id == Address.user_id).correlate(Address)
main_bq = self.bakery(lambda s: s.query(Address))
main_bq += lambda q: q.filter(
sub_bq.to_query(q).exists())
In the case where the subquery is used in the first callable against
a :class:`.Session`, the :class:`.Session` is also accepted::
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(
User.id == Address.user_id).correlate(Address)
main_bq = self.bakery(
lambda s: s.query(Address.id, sub_bq.to_query(q).as_scalar())
)
:param query_or_session: a :class:`_query.Query` object or a class
:class:`.Session` object, that is assumed to be within the context
of an enclosing :class:`.BakedQuery` callable.
.. versionadded:: 1.3
"""
if isinstance(query_or_session, Session):
session = query_or_session
elif isinstance(query_or_session, Query):
session = query_or_session.session
if session is None:
raise sa_exc.ArgumentError(
"Given Query needs to be associated with a Session"
)
else:
raise TypeError(
"Query or Session object expected, got %r."
% type(query_or_session)
)
return self._as_query(session)
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes["baked_queries"] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if "subquery" in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(
self, session, context, params, post_criteria
):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
if "baked_queries" not in context.attributes:
return
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(
self._bakery, lambda sess, q=query: q.with_session(sess)
)
bk._cache_key = cache_key
q = bk.for_session(session)
for fn in post_criteria:
q = q.with_post_criteria(fn)
context.attributes[k] = q.params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = "bq", "session", "_params", "_post_criteria"
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
self._post_criteria = []
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary."
)
self._params.update(kw)
return self
def _using_post_criteria(self, fns):
if fns:
self._post_criteria.extend(fns)
return self
def with_post_criteria(self, fn):
"""Add a criteria function that will be applied post-cache.
This adds a function that will be run against the
:class:`_query.Query` object after it is retrieved from the
cache. Functions here can be used to alter the query in ways
that **do not affect the SQL output**, such as execution options
and shard identifiers (when using a shard-enabled query object)
.. warning:: :meth:`.Result.with_post_criteria` functions are applied
to the :class:`_query.Query`
object **after** the query's SQL statement
object has been retrieved from the cache. Any operations here
which intend to modify the SQL should ensure that
:meth:`.BakedQuery.spoil` was called first.
.. versionadded:: 1.2
"""
return self._using_post_criteria([fn])
def _as_query(self):
q = self.bq._as_query(self.session).params(self._params)
for fn in self._post_criteria:
q = fn(q)
return q
def __str__(self):
return str(self._as_query())
def __iter__(self):
bq = self.bq
if not self.session.enable_baked_queries or bq._spoiled:
return iter(self._as_query())
baked_context = bq._bakery.get(bq._effective_key(self.session), None)
if baked_context is None:
baked_context = bq._bake(self.session)
context = copy.copy(baked_context)
context.session = self.session
context.attributes = context.attributes.copy()
bq._unbake_subquery_loaders(
self.session, context, self._params, self._post_criteria
)
context.statement.use_labels = True
if context.autoflush and not context.populate_existing:
self.session._autoflush()
q = context.query.params(self._params).with_session(self.session)
for fn in self._post_criteria:
q = fn(q)
return q._execute_and_instances(context)
def count(self):
"""return the 'count'.
Equivalent to :meth:`_query.Query.count`.
Note this uses a subquery to ensure an accurate count regardless
of the structure of the original statement.
.. versionadded:: 1.1.6
"""
col = func.count(literal_column("*"))
bq = self.bq.with_criteria(lambda q: q.from_self(col))
return bq.for_session(self.session).params(self._params).scalar()
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
Equivalent to :meth:`_query.Query.scalar`.
.. versionadded:: 1.1.6
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def first(self):
"""Return the first row.
Equivalent to :meth:`_query.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(
bq.for_session(self.session)
.params(self._params)
._using_post_criteria(self._post_criteria)
)
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`_query.Query.one`.
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound as err:
util.raise_(
orm_exc.MultipleResultsFound(
"Multiple rows were found for one()"
),
replace_context=err,
)
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def one_or_none(self):
"""Return one or zero results, or raise an exception for multiple
rows.
Equivalent to :meth:`_query.Query.one_or_none`.
.. versionadded:: 1.0.9
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()"
)
def all(self):
"""Return all rows.
Equivalent to :meth:`_query.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`_query.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_pk_identity)
def _load_on_pk_identity(self, query, primary_key_identity):
"""Load the given primary key identity from the database."""
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones
)
_lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False)
q._criterion = _lcl_get_clause
for fn in self._post_criteria:
q = fn(q)
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
# add the clause we got from mapper._get_clause to the cache
# key so that if a race causes multiple calls to _get_clause,
# we've cached on ours
bq = bq._clone()
bq._cache_key += (_get_clause,)
bq = bq.with_criteria(
setup, tuple(elem is None for elem in primary_key_identity)
)
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
@util.deprecated(
"1.2", "Baked lazy loading is now the default implementation."
)
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
The "baked" implementation of lazy loading is now the sole implementation
for the base lazy loader; this method has no effect except for a warning.
"""
pass
@util.deprecated(
"1.2", "Baked lazy loading is now the default implementation."
)
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This method now raises NotImplementedError() as the "baked" implementation
is the only lazy load implementation. The
:paramref:`_orm.relationship.bake_queries` flag may be used to disable
the caching of queries on a per-relationship basis.
"""
raise NotImplementedError(
"Baked lazy loading is now the default implementation"
)
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
@util.deprecated(
"1.2",
"Baked lazy loading is now the default "
"implementation for lazy loading.",
)
def baked_lazyload(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, False, {}
)
@baked_lazyload._add_unbound_all_fn
@util.deprecated(
"1.2",
"Baked lazy loading is now the default "
"implementation for lazy loading.",
)
def baked_lazyload_all(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, True, {}
)
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/automap.py | # ext/automap.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""Define an extension to the :mod:`sqlalchemy.ext.declarative` system
which automatically generates mapped classes and relationships from a database
schema, typically though not necessarily one which is reflected.
.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`.
It is hoped that the :class:`.AutomapBase` system provides a quick
and modernized solution to the problem that the very famous
`SQLSoup <https://sqlsoup.readthedocs.io/en/latest/>`_
also tries to solve, that of generating a quick and rudimentary object
model from an existing database on the fly. By addressing the issue strictly
at the mapper configuration level, and integrating fully with existing
Declarative class techniques, :class:`.AutomapBase` seeks to provide
a well-integrated approach to the issue of expediently auto-generating ad-hoc
mappings.
Basic Use
=========
The simplest usage is to reflect an existing database into a new model.
We create a new :class:`.AutomapBase` class in a similar manner as to how
we create a declarative base class, using :func:`.automap_base`.
We then call :meth:`.AutomapBase.prepare` on the resulting base class,
asking it to reflect the schema and produce mappings::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
Base = automap_base()
# engine, suppose it has two tables 'user' and 'address' set up
engine = create_engine("sqlite:///mydatabase.db")
# reflect the tables
Base.prepare(engine, reflect=True)
# mapped classes are now created with names by default
# matching that of the table name.
User = Base.classes.user
Address = Base.classes.address
session = Session(engine)
# rudimentary relationships are produced
session.add(Address(email_address="foo@bar.com", user=User(name="foo")))
session.commit()
# collection-based relationships are by default named
# "<classname>_collection"
print (u1.address_collection)
Above, calling :meth:`.AutomapBase.prepare` while passing along the
:paramref:`.AutomapBase.prepare.reflect` parameter indicates that the
:meth:`_schema.MetaData.reflect`
method will be called on this declarative base
classes' :class:`_schema.MetaData` collection; then, each **viable**
:class:`_schema.Table` within the :class:`_schema.MetaData`
will get a new mapped class
generated automatically. The :class:`_schema.ForeignKeyConstraint`
objects which
link the various tables together will be used to produce new, bidirectional
:func:`_orm.relationship` objects between classes.
The classes and relationships
follow along a default naming scheme that we can customize. At this point,
our basic mapping consisting of related ``User`` and ``Address`` classes is
ready to use in the traditional way.
.. note:: By **viable**, we mean that for a table to be mapped, it must
specify a primary key. Additionally, if the table is detected as being
a pure association table between two other tables, it will not be directly
mapped and will instead be configured as a many-to-many table between
the mappings for the two referring tables.
Generating Mappings from an Existing MetaData
=============================================
We can pass a pre-declared :class:`_schema.MetaData` object to
:func:`.automap_base`.
This object can be constructed in any way, including programmatically, from
a serialized file, or from itself being reflected using
:meth:`_schema.MetaData.reflect`.
Below we illustrate a combination of reflection and
explicit table declaration::
from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey
from sqlalchemy.ext.automap import automap_base
engine = create_engine("sqlite:///mydatabase.db")
# produce our own MetaData object
metadata = MetaData()
# we can reflect it ourselves from a database, using options
# such as 'only' to limit what tables we look at...
metadata.reflect(engine, only=['user', 'address'])
# ... or just define our own Table objects with it (or combine both)
Table('user_order', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', ForeignKey('user.id'))
)
# we can then produce a set of mappings from this MetaData.
Base = automap_base(metadata=metadata)
# calling prepare() just sets up mapped classes and relationships.
Base.prepare()
# mapped classes are ready
User, Address, Order = Base.classes.user, Base.classes.address,\
Base.classes.user_order
Specifying Classes Explicitly
=============================
The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined
explicitly, in a way similar to that of the :class:`.DeferredReflection` class.
Classes that extend from :class:`.AutomapBase` act like regular declarative
classes, but are not immediately mapped after their construction, and are
instead mapped when we call :meth:`.AutomapBase.prepare`. The
:meth:`.AutomapBase.prepare` method will make use of the classes we've
established based on the table name we use. If our schema contains tables
``user`` and ``address``, we can define one or both of the classes to be used::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
# automap base
Base = automap_base()
# pre-declare User for the 'user' table
class User(Base):
__tablename__ = 'user'
# override schema elements like Columns
user_name = Column('name', String)
# override relationships too, if desired.
# we must use the same name that automap would use for the
# relationship, and also must refer to the class name that automap will
# generate for "address"
address_collection = relationship("address", collection_class=set)
# reflect
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True)
# we still have Address generated from the tablename "address",
# but User is the same as Base.classes.User now
Address = Base.classes.address
u1 = session.query(User).first()
print (u1.address_collection)
# the backref is still there:
a1 = session.query(Address).first()
print (a1.user)
Above, one of the more intricate details is that we illustrated overriding
one of the :func:`_orm.relationship` objects that automap would have created.
To do this, we needed to make sure the names match up with what automap
would normally generate, in that the relationship name would be
``User.address_collection`` and the name of the class referred to, from
automap's perspective, is called ``address``, even though we are referring to
it as ``Address`` within our usage of this class.
Overriding Naming Schemes
=========================
:mod:`.sqlalchemy.ext.automap` is tasked with producing mapped classes and
relationship names based on a schema, which means it has decision points in how
these names are determined. These three decision points are provided using
functions which can be passed to the :meth:`.AutomapBase.prepare` method, and
are known as :func:`.classname_for_table`,
:func:`.name_for_scalar_relationship`,
and :func:`.name_for_collection_relationship`. Any or all of these
functions are provided as in the example below, where we use a "camel case"
scheme for class names and a "pluralizer" for collection names using the
`Inflect <https://pypi.python.org/pypi/inflect>`_ package::
import re
import inflect
def camelize_classname(base, tablename, table):
"Produce a 'camelized' class name, e.g. "
"'words_and_underscores' -> 'WordsAndUnderscores'"
return str(tablename[0].upper() + \
re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:]))
_pluralizer = inflect.engine()
def pluralize_collection(base, local_cls, referred_cls, constraint):
"Produce an 'uncamelized', 'pluralized' class name, e.g. "
"'SomeTerm' -> 'some_terms'"
referred_name = referred_cls.__name__
uncamelized = re.sub(r'[A-Z]',
lambda m: "_%s" % m.group(0).lower(),
referred_name)[1:]
pluralized = _pluralizer.plural(uncamelized)
return pluralized
from sqlalchemy.ext.automap import automap_base
Base = automap_base()
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True,
classname_for_table=camelize_classname,
name_for_collection_relationship=pluralize_collection
)
From the above mapping, we would now have classes ``User`` and ``Address``,
where the collection from ``User`` to ``Address`` is called
``User.addresses``::
User, Address = Base.classes.User, Base.classes.Address
u1 = User(addresses=[Address(email="foo@bar.com")])
Relationship Detection
======================
The vast majority of what automap accomplishes is the generation of
:func:`_orm.relationship` structures based on foreign keys. The mechanism
by which this works for many-to-one and one-to-many relationships is as
follows:
1. A given :class:`_schema.Table`, known to be mapped to a particular class,
is examined for :class:`_schema.ForeignKeyConstraint` objects.
2. From each :class:`_schema.ForeignKeyConstraint`, the remote
:class:`_schema.Table`
object present is matched up to the class to which it is to be mapped,
if any, else it is skipped.
3. As the :class:`_schema.ForeignKeyConstraint`
we are examining corresponds to a
reference from the immediate mapped class, the relationship will be set up
as a many-to-one referring to the referred class; a corresponding
one-to-many backref will be created on the referred class referring
to this class.
4. If any of the columns that are part of the
:class:`_schema.ForeignKeyConstraint`
are not nullable (e.g. ``nullable=False``), a
:paramref:`_orm.relationship.cascade` keyword argument
of ``all, delete-orphan`` will be added to the keyword arguments to
be passed to the relationship or backref. If the
:class:`_schema.ForeignKeyConstraint` reports that
:paramref:`_schema.ForeignKeyConstraint.ondelete`
is set to ``CASCADE`` for a not null or ``SET NULL`` for a nullable
set of columns, the option :paramref:`_orm.relationship.passive_deletes`
flag is set to ``True`` in the set of relationship keyword arguments.
Note that not all backends support reflection of ON DELETE.
.. versionadded:: 1.0.0 - automap will detect non-nullable foreign key
constraints when producing a one-to-many relationship and establish
a default cascade of ``all, delete-orphan`` if so; additionally,
if the constraint specifies
:paramref:`_schema.ForeignKeyConstraint.ondelete`
of ``CASCADE`` for non-nullable or ``SET NULL`` for nullable columns,
the ``passive_deletes=True`` option is also added.
5. The names of the relationships are determined using the
:paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and
:paramref:`.AutomapBase.prepare.name_for_collection_relationship`
callable functions. It is important to note that the default relationship
naming derives the name from the **the actual class name**. If you've
given a particular class an explicit name by declaring it, or specified an
alternate class naming scheme, that's the name from which the relationship
name will be derived.
6. The classes are inspected for an existing mapped property matching these
names. If one is detected on one side, but none on the other side,
:class:`.AutomapBase` attempts to create a relationship on the missing side,
then uses the :paramref:`_orm.relationship.back_populates`
parameter in order to
point the new relationship to the other side.
7. In the usual case where no relationship is on either side,
:meth:`.AutomapBase.prepare` produces a :func:`_orm.relationship` on the
"many-to-one" side and matches it to the other using the
:paramref:`_orm.relationship.backref` parameter.
8. Production of the :func:`_orm.relationship` and optionally the
:func:`.backref`
is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship`
function, which can be supplied by the end-user in order to augment
the arguments passed to :func:`_orm.relationship` or :func:`.backref` or to
make use of custom implementations of these functions.
Custom Relationship Arguments
-----------------------------
The :paramref:`.AutomapBase.prepare.generate_relationship` hook can be used
to add parameters to relationships. For most cases, we can make use of the
existing :func:`.automap.generate_relationship` function to return
the object, after augmenting the given keyword dictionary with our own
arguments.
Below is an illustration of how to send
:paramref:`_orm.relationship.cascade` and
:paramref:`_orm.relationship.passive_deletes`
options along to all one-to-many relationships::
from sqlalchemy.ext.automap import generate_relationship
def _gen_relationship(base, direction, return_fn,
attrname, local_cls, referred_cls, **kw):
if direction is interfaces.ONETOMANY:
kw['cascade'] = 'all, delete-orphan'
kw['passive_deletes'] = True
# make use of the built-in function to actually return
# the result.
return generate_relationship(base, direction, return_fn,
attrname, local_cls, referred_cls, **kw)
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
# automap base
Base = automap_base()
engine = create_engine("sqlite:///mydatabase.db")
Base.prepare(engine, reflect=True,
generate_relationship=_gen_relationship)
Many-to-Many relationships
--------------------------
:mod:`.sqlalchemy.ext.automap` will generate many-to-many relationships, e.g.
those which contain a ``secondary`` argument. The process for producing these
is as follows:
1. A given :class:`_schema.Table` is examined for
:class:`_schema.ForeignKeyConstraint`
objects, before any mapped class has been assigned to it.
2. If the table contains two and exactly two
:class:`_schema.ForeignKeyConstraint`
objects, and all columns within this table are members of these two
:class:`_schema.ForeignKeyConstraint` objects, the table is assumed to be a
"secondary" table, and will **not be mapped directly**.
3. The two (or one, for self-referential) external tables to which the
:class:`_schema.Table`
refers to are matched to the classes to which they will be
mapped, if any.
4. If mapped classes for both sides are located, a many-to-many bi-directional
:func:`_orm.relationship` / :func:`.backref`
pair is created between the two
classes.
5. The override logic for many-to-many works the same as that of one-to-many/
many-to-one; the :func:`.generate_relationship` function is called upon
to generate the structures and existing attributes will be maintained.
Relationships with Inheritance
------------------------------
:mod:`.sqlalchemy.ext.automap` will not generate any relationships between
two classes that are in an inheritance relationship. That is, with two
classes given as follows::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee', 'polymorphic_on': type
}
class Engineer(Employee):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity':'engineer',
}
The foreign key from ``Engineer`` to ``Employee`` is used not for a
relationship, but to establish joined inheritance between the two classes.
Note that this means automap will not generate *any* relationships
for foreign keys that link from a subclass to a superclass. If a mapping
has actual relationships from subclass to superclass as well, those
need to be explicit. Below, as we have two separate foreign keys
from ``Engineer`` to ``Employee``, we need to set up both the relationship
we want as well as the ``inherit_condition``, as these are not things
SQLAlchemy can guess::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee', 'polymorphic_on':type
}
class Engineer(Employee):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
favorite_employee_id = Column(Integer, ForeignKey('employee.id'))
favorite_employee = relationship(Employee,
foreign_keys=favorite_employee_id)
__mapper_args__ = {
'polymorphic_identity':'engineer',
'inherit_condition': id == Employee.id
}
Handling Simple Naming Conflicts
--------------------------------
In the case of naming conflicts during mapping, override any of
:func:`.classname_for_table`, :func:`.name_for_scalar_relationship`,
and :func:`.name_for_collection_relationship` as needed. For example, if
automap is attempting to name a many-to-one relationship the same as an
existing column, an alternate convention can be conditionally selected. Given
a schema:
.. sourcecode:: sql
CREATE TABLE table_a (
id INTEGER PRIMARY KEY
);
CREATE TABLE table_b (
id INTEGER PRIMARY KEY,
table_a INTEGER,
FOREIGN KEY(table_a) REFERENCES table_a(id)
);
The above schema will first automap the ``table_a`` table as a class named
``table_a``; it will then automap a relationship onto the class for ``table_b``
with the same name as this related class, e.g. ``table_a``. This
relationship name conflicts with the mapping column ``table_b.table_a``,
and will emit an error on mapping.
We can resolve this conflict by using an underscore as follows::
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower()
local_table = local_cls.__table__
if name in local_table.columns:
newname = name + "_"
warnings.warn(
"Already detected name %s present. using %s" %
(name, newname))
return newname
return name
Base.prepare(engine, reflect=True,
name_for_scalar_relationship=name_for_scalar_relationship)
Alternatively, we can change the name on the column side. The columns
that are mapped can be modified using the technique described at
:ref:`mapper_column_distinct_names`, by assigning the column explicitly
to a new name::
Base = automap_base()
class TableB(Base):
__tablename__ = 'table_b'
_table_a = Column('table_a', ForeignKey('table_a.id'))
Base.prepare(engine, reflect=True)
Using Automap with Explicit Declarations
========================================
As noted previously, automap has no dependency on reflection, and can make
use of any collection of :class:`_schema.Table` objects within a
:class:`_schema.MetaData`
collection. From this, it follows that automap can also be used
generate missing relationships given an otherwise complete model that fully
defines table metadata::
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import Column, Integer, String, ForeignKey
Base = automap_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
email = Column(String)
user_id = Column(ForeignKey('user.id'))
# produce relationships
Base.prepare()
# mapping is complete, with "address_collection" and
# "user" relationships
a1 = Address(email='u1')
a2 = Address(email='u2')
u1 = User(address_collection=[a1, a2])
assert a1.user is u1
Above, given mostly complete ``User`` and ``Address`` mappings, the
:class:`_schema.ForeignKey` which we defined on ``Address.user_id`` allowed a
bidirectional relationship pair ``Address.user`` and
``User.address_collection`` to be generated on the mapped classes.
Note that when subclassing :class:`.AutomapBase`,
the :meth:`.AutomapBase.prepare` method is required; if not called, the classes
we've declared are in an un-mapped state.
""" # noqa
from .declarative import declarative_base as _declarative_base
from .declarative.base import _DeferredMapperConfig
from .. import util
from ..orm import backref
from ..orm import exc as orm_exc
from ..orm import interfaces
from ..orm import relationship
from ..orm.mapper import _CONFIGURE_MUTEX
from ..schema import ForeignKeyConstraint
from ..sql import and_
def classname_for_table(base, tablename, table):
"""Return the class name that should be used, given the name
of a table.
The default implementation is::
return str(tablename)
Alternate implementations can be specified using the
:paramref:`.AutomapBase.prepare.classname_for_table`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param tablename: string name of the :class:`_schema.Table`.
:param table: the :class:`_schema.Table` object itself.
:return: a string class name.
.. note::
In Python 2, the string used for the class name **must** be a
non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute
of :class:`_schema.Table` is typically a Python unicode subclass,
so the
``str()`` function should be applied to this name, after accounting for
any non-ASCII characters.
"""
return str(tablename)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
"""Return the attribute name that should be used to refer from one
class to another, for a scalar object reference.
The default implementation is::
return referred_cls.__name__.lower()
Alternate implementations can be specified using the
:paramref:`.AutomapBase.prepare.name_for_scalar_relationship`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param local_cls: the class to be mapped on the local side.
:param referred_cls: the class to be mapped on the referring side.
:param constraint: the :class:`_schema.ForeignKeyConstraint` that is being
inspected to produce this relationship.
"""
return referred_cls.__name__.lower()
def name_for_collection_relationship(
base, local_cls, referred_cls, constraint
):
"""Return the attribute name that should be used to refer from one
class to another, for a collection reference.
The default implementation is::
return referred_cls.__name__.lower() + "_collection"
Alternate implementations
can be specified using the
:paramref:`.AutomapBase.prepare.name_for_collection_relationship`
parameter.
:param base: the :class:`.AutomapBase` class doing the prepare.
:param local_cls: the class to be mapped on the local side.
:param referred_cls: the class to be mapped on the referring side.
:param constraint: the :class:`_schema.ForeignKeyConstraint` that is being
inspected to produce this relationship.
"""
return referred_cls.__name__.lower() + "_collection"
def generate_relationship(
base, direction, return_fn, attrname, local_cls, referred_cls, **kw
):
r"""Generate a :func:`_orm.relationship` or :func:`.backref`
on behalf of two
mapped classes.
An alternate implementation of this function can be specified using the
:paramref:`.AutomapBase.prepare.generate_relationship` parameter.
The default implementation of this function is as follows::
if return_fn is backref:
return return_fn(attrname, **kw)
elif return_fn is relationship:
return return_fn(referred_cls, **kw)
else:
raise TypeError("Unknown relationship function: %s" % return_fn)
:param base: the :class:`.AutomapBase` class doing the prepare.
:param direction: indicate the "direction" of the relationship; this will
be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOMANY`.
:param return_fn: the function that is used by default to create the
relationship. This will be either :func:`_orm.relationship` or
:func:`.backref`. The :func:`.backref` function's result will be used to
produce a new :func:`_orm.relationship` in a second step,
so it is critical
that user-defined implementations correctly differentiate between the two
functions, if a custom relationship function is being used.
:param attrname: the attribute name to which this relationship is being
assigned. If the value of :paramref:`.generate_relationship.return_fn` is
the :func:`.backref` function, then this name is the name that is being
assigned to the backref.
:param local_cls: the "local" class to which this relationship or backref
will be locally present.
:param referred_cls: the "referred" class to which the relationship or
backref refers to.
:param \**kw: all additional keyword arguments are passed along to the
function.
:return: a :func:`_orm.relationship` or :func:`.backref` construct,
as dictated
by the :paramref:`.generate_relationship.return_fn` parameter.
"""
if return_fn is backref:
return return_fn(attrname, **kw)
elif return_fn is relationship:
return return_fn(referred_cls, **kw)
else:
raise TypeError("Unknown relationship function: %s" % return_fn)
class AutomapBase(object):
"""Base class for an "automap" schema.
The :class:`.AutomapBase` class can be compared to the "declarative base"
class that is produced by the :func:`.declarative.declarative_base`
function. In practice, the :class:`.AutomapBase` class is always used
as a mixin along with an actual declarative base.
A new subclassable :class:`.AutomapBase` is typically instantiated
using the :func:`.automap_base` function.
.. seealso::
:ref:`automap_toplevel`
"""
__abstract__ = True
classes = None
"""An instance of :class:`.util.Properties` containing classes.
This object behaves much like the ``.c`` collection on a table. Classes
are present under the name they were given, e.g.::
Base = automap_base()
Base.prepare(engine=some_engine, reflect=True)
User, Address = Base.classes.User, Base.classes.Address
"""
@classmethod
def prepare(
cls,
engine=None,
reflect=False,
schema=None,
classname_for_table=classname_for_table,
collection_class=list,
name_for_scalar_relationship=name_for_scalar_relationship,
name_for_collection_relationship=name_for_collection_relationship,
generate_relationship=generate_relationship,
):
"""Extract mapped classes and relationships from the
:class:`_schema.MetaData` and
perform mappings.
:param engine: an :class:`_engine.Engine` or
:class:`_engine.Connection` with which
to perform schema reflection, if specified.
If the :paramref:`.AutomapBase.prepare.reflect` argument is False,
this object is not used.
:param reflect: if True, the :meth:`_schema.MetaData.reflect`
method is called
on the :class:`_schema.MetaData` associated with this
:class:`.AutomapBase`.
The :class:`_engine.Engine` passed via
:paramref:`.AutomapBase.prepare.engine` will be used to perform the
reflection if present; else, the :class:`_schema.MetaData`
should already be
bound to some engine else the operation will fail.
:param classname_for_table: callable function which will be used to
produce new class names, given a table name. Defaults to
:func:`.classname_for_table`.
:param name_for_scalar_relationship: callable function which will be
used to produce relationship names for scalar relationships. Defaults
to :func:`.name_for_scalar_relationship`.
:param name_for_collection_relationship: callable function which will
be used to produce relationship names for collection-oriented
relationships. Defaults to :func:`.name_for_collection_relationship`.
:param generate_relationship: callable function which will be used to
actually generate :func:`_orm.relationship` and :func:`.backref`
constructs. Defaults to :func:`.generate_relationship`.
:param collection_class: the Python collection class that will be used
when a new :func:`_orm.relationship`
object is created that represents a
collection. Defaults to ``list``.
:param schema: When present in conjunction with the
:paramref:`.AutomapBase.prepare.reflect` flag, is passed to
:meth:`_schema.MetaData.reflect`
to indicate the primary schema where tables
should be reflected from. When omitted, the default schema in use
by the database connection is used.
.. versionadded:: 1.1
"""
if reflect:
cls.metadata.reflect(
engine,
schema=schema,
extend_existing=True,
autoload_replace=False,
)
_CONFIGURE_MUTEX.acquire()
try:
table_to_map_config = dict(
(m.local_table, m)
for m in _DeferredMapperConfig.classes_for_base(
cls, sort=False
)
)
many_to_many = []
for table in cls.metadata.tables.values():
lcl_m2m, rem_m2m, m2m_const = _is_many_to_many(cls, table)
if lcl_m2m is not None:
many_to_many.append((lcl_m2m, rem_m2m, m2m_const, table))
elif not table.primary_key:
continue
elif table not in table_to_map_config:
mapped_cls = type(
classname_for_table(cls, table.name, table),
(cls,),
{"__table__": table},
)
map_config = _DeferredMapperConfig.config_for_cls(
mapped_cls
)
cls.classes[map_config.cls.__name__] = mapped_cls
table_to_map_config[table] = map_config
for map_config in table_to_map_config.values():
_relationships_for_fks(
cls,
map_config,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
)
for lcl_m2m, rem_m2m, m2m_const, table in many_to_many:
_m2m_relationship(
cls,
lcl_m2m,
rem_m2m,
m2m_const,
table,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
)
for map_config in _DeferredMapperConfig.classes_for_base(cls):
map_config.map()
finally:
_CONFIGURE_MUTEX.release()
_sa_decl_prepare = True
"""Indicate that the mapping of classes should be deferred.
The presence of this attribute name indicates to declarative
that the call to mapper() should not occur immediately; instead,
information about the table and attributes to be mapped are gathered
into an internal structure called _DeferredMapperConfig. These
objects can be collected later using classes_for_base(), additional
mapping decisions can be made, and then the map() method will actually
apply the mapping.
The only real reason this deferral of the whole
thing is needed is to support primary key columns that aren't reflected
yet when the class is declared; everything else can theoretically be
added to the mapper later. However, the _DeferredMapperConfig is a
nice interface in any case which exists at that not usually exposed point
at which declarative has the class and the Table but hasn't called
mapper() yet.
"""
@classmethod
def _sa_raise_deferred_config(cls):
raise orm_exc.UnmappedClassError(
cls,
msg="Class %s is a subclass of AutomapBase. "
"Mappings are not produced until the .prepare() "
"method is called on the class hierarchy."
% orm_exc._safe_cls_name(cls),
)
def automap_base(declarative_base=None, **kw):
r"""Produce a declarative automap base.
This function produces a new base class that is a product of the
:class:`.AutomapBase` class as well a declarative base produced by
:func:`.declarative.declarative_base`.
All parameters other than ``declarative_base`` are keyword arguments
that are passed directly to the :func:`.declarative.declarative_base`
function.
:param declarative_base: an existing class produced by
:func:`.declarative.declarative_base`. When this is passed, the function
no longer invokes :func:`.declarative.declarative_base` itself, and all
other keyword arguments are ignored.
:param \**kw: keyword arguments are passed along to
:func:`.declarative.declarative_base`.
"""
if declarative_base is None:
Base = _declarative_base(**kw)
else:
Base = declarative_base
return type(
Base.__name__,
(AutomapBase, Base),
{"__abstract__": True, "classes": util.Properties({})},
)
def _is_many_to_many(automap_base, table):
fk_constraints = [
const
for const in table.constraints
if isinstance(const, ForeignKeyConstraint)
]
if len(fk_constraints) != 2:
return None, None, None
cols = sum(
[
[fk.parent for fk in fk_constraint.elements]
for fk_constraint in fk_constraints
],
[],
)
if set(cols) != set(table.c):
return None, None, None
return (
fk_constraints[0].elements[0].column.table,
fk_constraints[1].elements[0].column.table,
fk_constraints,
)
def _relationships_for_fks(
automap_base,
map_config,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
):
local_table = map_config.local_table
local_cls = map_config.cls # derived from a weakref, may be None
if local_table is None or local_cls is None:
return
for constraint in local_table.constraints:
if isinstance(constraint, ForeignKeyConstraint):
fks = constraint.elements
referred_table = fks[0].column.table
referred_cfg = table_to_map_config.get(referred_table, None)
if referred_cfg is None:
continue
referred_cls = referred_cfg.cls
if local_cls is not referred_cls and issubclass(
local_cls, referred_cls
):
continue
relationship_name = name_for_scalar_relationship(
automap_base, local_cls, referred_cls, constraint
)
backref_name = name_for_collection_relationship(
automap_base, referred_cls, local_cls, constraint
)
o2m_kws = {}
nullable = False not in {fk.parent.nullable for fk in fks}
if not nullable:
o2m_kws["cascade"] = "all, delete-orphan"
if (
constraint.ondelete
and constraint.ondelete.lower() == "cascade"
):
o2m_kws["passive_deletes"] = True
else:
if (
constraint.ondelete
and constraint.ondelete.lower() == "set null"
):
o2m_kws["passive_deletes"] = True
create_backref = backref_name not in referred_cfg.properties
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.ONETOMANY,
backref,
backref_name,
referred_cls,
local_cls,
collection_class=collection_class,
**o2m_kws
)
else:
backref_obj = None
rel = generate_relationship(
automap_base,
interfaces.MANYTOONE,
relationship,
relationship_name,
local_cls,
referred_cls,
foreign_keys=[fk.parent for fk in constraint.elements],
backref=backref_obj,
remote_side=[fk.column for fk in constraint.elements],
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name
].back_populates = relationship_name
elif create_backref:
rel = generate_relationship(
automap_base,
interfaces.ONETOMANY,
relationship,
backref_name,
referred_cls,
local_cls,
foreign_keys=[fk.parent for fk in constraint.elements],
back_populates=relationship_name,
collection_class=collection_class,
**o2m_kws
)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name
].back_populates = backref_name
def _m2m_relationship(
automap_base,
lcl_m2m,
rem_m2m,
m2m_const,
table,
table_to_map_config,
collection_class,
name_for_scalar_relationship,
name_for_collection_relationship,
generate_relationship,
):
map_config = table_to_map_config.get(lcl_m2m, None)
referred_cfg = table_to_map_config.get(rem_m2m, None)
if map_config is None or referred_cfg is None:
return
local_cls = map_config.cls
referred_cls = referred_cfg.cls
relationship_name = name_for_collection_relationship(
automap_base, local_cls, referred_cls, m2m_const[0]
)
backref_name = name_for_collection_relationship(
automap_base, referred_cls, local_cls, m2m_const[1]
)
create_backref = backref_name not in referred_cfg.properties
if relationship_name not in map_config.properties:
if create_backref:
backref_obj = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
backref,
backref_name,
referred_cls,
local_cls,
collection_class=collection_class,
)
else:
backref_obj = None
rel = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
relationship,
relationship_name,
local_cls,
referred_cls,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[0].elements
),
secondaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[1].elements
),
backref=backref_obj,
collection_class=collection_class,
)
if rel is not None:
map_config.properties[relationship_name] = rel
if not create_backref:
referred_cfg.properties[
backref_name
].back_populates = relationship_name
elif create_backref:
rel = generate_relationship(
automap_base,
interfaces.MANYTOMANY,
relationship,
backref_name,
referred_cls,
local_cls,
secondary=table,
primaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[1].elements
),
secondaryjoin=and_(
fk.column == fk.parent for fk in m2m_const[0].elements
),
back_populates=relationship_name,
collection_class=collection_class,
)
if rel is not None:
referred_cfg.properties[backref_name] = rel
map_config.properties[
relationship_name
].back_populates = backref_name
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/horizontal_shard.py | # ext/horizontal_shard.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Horizontal sharding support.
Defines a rudimental 'horizontal sharding' system which allows a Session to
distribute queries and persistence operations across multiple databases.
For a usage example, see the :ref:`examples_sharding` example included in
the source distribution.
"""
from .. import inspect
from .. import util
from ..orm.query import Query
from ..orm.session import Session
__all__ = ["ShardedSession", "ShardedQuery"]
class ShardedQuery(Query):
def __init__(self, *args, **kwargs):
super(ShardedQuery, self).__init__(*args, **kwargs)
self.id_chooser = self.session.id_chooser
self.query_chooser = self.session.query_chooser
self._shard_id = None
def set_shard(self, shard_id):
"""return a new query, limited to a single shard ID.
all subsequent operations with the returned query will
be against the single shard regardless of other state.
"""
q = self._clone()
q._shard_id = shard_id
return q
def _execute_and_instances(self, context):
def iter_for_shard(shard_id):
context.attributes["shard_id"] = context.identity_token = shard_id
result = self._connection_from_session(
mapper=self._bind_mapper(), shard_id=shard_id
).execute(context.statement, self._params)
return self.instances(result, context)
if context.identity_token is not None:
return iter_for_shard(context.identity_token)
elif self._shard_id is not None:
return iter_for_shard(self._shard_id)
else:
partial = []
for shard_id in self.query_chooser(self):
partial.extend(iter_for_shard(shard_id))
# if some kind of in memory 'sorting'
# were done, this is where it would happen
return iter(partial)
def _execute_crud(self, stmt, mapper):
def exec_for_shard(shard_id):
conn = self._connection_from_session(
mapper=mapper,
shard_id=shard_id,
clause=stmt,
close_with_result=True,
)
result = conn.execute(stmt, self._params)
return result
if self._shard_id is not None:
return exec_for_shard(self._shard_id)
else:
rowcount = 0
results = []
for shard_id in self.query_chooser(self):
result = exec_for_shard(shard_id)
rowcount += result.rowcount
results.append(result)
return ShardedResult(results, rowcount)
def _identity_lookup(
self,
mapper,
primary_key_identity,
identity_token=None,
lazy_loaded_from=None,
**kw
):
"""override the default Query._identity_lookup method so that we
search for a given non-token primary key identity across all
possible identity tokens (e.g. shard ids).
"""
if identity_token is not None:
return super(ShardedQuery, self)._identity_lookup(
mapper,
primary_key_identity,
identity_token=identity_token,
**kw
)
else:
q = self.session.query(mapper)
if lazy_loaded_from:
q = q._set_lazyload_from(lazy_loaded_from)
for shard_id in self.id_chooser(q, primary_key_identity):
obj = super(ShardedQuery, self)._identity_lookup(
mapper, primary_key_identity, identity_token=shard_id, **kw
)
if obj is not None:
return obj
return None
def _get_impl(self, primary_key_identity, db_load_fn, identity_token=None):
"""Override the default Query._get_impl() method so that we emit
a query to the DB for each possible identity token, if we don't
have one already.
"""
def _db_load_fn(query, primary_key_identity):
# load from the database. The original db_load_fn will
# use the given Query object to load from the DB, so our
# shard_id is what will indicate the DB that we query from.
if self._shard_id is not None:
return db_load_fn(self, primary_key_identity)
else:
ident = util.to_list(primary_key_identity)
# build a ShardedQuery for each shard identifier and
# try to load from the DB
for shard_id in self.id_chooser(self, ident):
q = self.set_shard(shard_id)
o = db_load_fn(q, ident)
if o is not None:
return o
else:
return None
if identity_token is None and self._shard_id is not None:
identity_token = self._shard_id
return super(ShardedQuery, self)._get_impl(
primary_key_identity, _db_load_fn, identity_token=identity_token
)
class ShardedResult(object):
"""A value object that represents multiple :class:`_engine.ResultProxy`
objects.
This is used by the :meth:`.ShardedQuery._execute_crud` hook to return
an object that takes the place of the single :class:`_engine.ResultProxy`.
Attribute include ``result_proxies``, which is a sequence of the
actual :class:`_engine.ResultProxy` objects,
as well as ``aggregate_rowcount``
or ``rowcount``, which is the sum of all the individual rowcount values.
.. versionadded:: 1.3
"""
__slots__ = ("result_proxies", "aggregate_rowcount")
def __init__(self, result_proxies, aggregate_rowcount):
self.result_proxies = result_proxies
self.aggregate_rowcount = aggregate_rowcount
@property
def rowcount(self):
return self.aggregate_rowcount
class ShardedSession(Session):
def __init__(
self,
shard_chooser,
id_chooser,
query_chooser,
shards=None,
query_cls=ShardedQuery,
**kwargs
):
"""Construct a ShardedSession.
:param shard_chooser: A callable which, passed a Mapper, a mapped
instance, and possibly a SQL clause, returns a shard ID. This id
may be based off of the attributes present within the object, or on
some round-robin scheme. If the scheme is based on a selection, it
should set whatever state on the instance to mark it in the future as
participating in that shard.
:param id_chooser: A callable, passed a query and a tuple of identity
values, which should return a list of shard ids where the ID might
reside. The databases will be queried in the order of this listing.
:param query_chooser: For a given Query, returns the list of shard_ids
where the query should be issued. Results from all shards returned
will be combined together into a single listing.
:param shards: A dictionary of string shard names
to :class:`~sqlalchemy.engine.Engine` objects.
"""
super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs)
self.shard_chooser = shard_chooser
self.id_chooser = id_chooser
self.query_chooser = query_chooser
self.__binds = {}
self.connection_callable = self.connection
if shards is not None:
for k in shards:
self.bind_shard(k, shards[k])
def _choose_shard_and_assign(self, mapper, instance, **kw):
if instance is not None:
state = inspect(instance)
if state.key:
token = state.key[2]
assert token is not None
return token
elif state.identity_token:
return state.identity_token
shard_id = self.shard_chooser(mapper, instance, **kw)
if instance is not None:
state.identity_token = shard_id
return shard_id
def connection(self, mapper=None, instance=None, shard_id=None, **kwargs):
if shard_id is None:
shard_id = self._choose_shard_and_assign(mapper, instance)
if self.transaction is not None:
return self.transaction.connection(mapper, shard_id=shard_id)
else:
return self.get_bind(
mapper, shard_id=shard_id, instance=instance
)._contextual_connect(**kwargs)
def get_bind(
self, mapper, shard_id=None, instance=None, clause=None, **kw
):
if shard_id is None:
shard_id = self._choose_shard_and_assign(
mapper, instance, clause=clause
)
return self.__binds[shard_id]
def bind_shard(self, shard_id, bind):
self.__binds[shard_id] = bind
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/mutable.py | # ext/mutable.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""Provide support for tracking of in-place changes to scalar values,
which are propagated into ORM change events on owning parent objects.
.. _mutable_scalars:
Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
class JSONEncodedDict(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The
:mod:`sqlalchemy.ext.mutable` extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`_postgresql.ARRAY`, etc.
When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself
tracks all parents which reference it. Below, we illustrate a simple
version of the :class:`.MutableDict` dictionary object, which applies
the :class:`.Mutable` mixin to a plain Python dictionary::
from sqlalchemy.ext.mutable import Mutable
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
variants on this approach, such as subclassing ``UserDict.UserDict`` or
``collections.MutableMapping``; the part that's important to this example is
that the :meth:`.Mutable.changed` method is called whenever an in-place
change to the datastructure takes place.
We also redefine the :meth:`.Mutable.coerce` method which will be used to
convert any values that are not instances of ``MutableDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well
created our ``JSONEncodedDict`` such that it always returns an instance
of ``MutableDict``, and additionally ensured that all calling code
uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not
overridden, any values applied to a parent object which are not instances
of the mutable type will raise a ``ValueError``.
Our new ``MutableDict`` type offers a class method
:meth:`~.Mutable.as_mutable` which we can use within column metadata
to associate with types. This method grabs the given type object or
class and associates a listener that will detect all future mappings
of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutableDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
from sqlalchemy import mapper
class MyDataClass(object):
pass
# associates mutation listeners with MyDataClass.data
mapper(MyDataClass, my_data)
The ``MyDataClass.data`` member will now be notified of in place changes
to its value.
There's no difference in usage when using declarative::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
Any in-place changes to the ``MyDataClass.data`` member
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> m1 = MyDataClass(data={'value1':'foo'})
>>> sess.add(m1)
>>> sess.commit()
>>> m1.data['value1'] = 'bar'
>>> assert m1 in sess.dirty
True
The ``MutableDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using
:meth:`~.Mutable.associate_with`. This is similar to
:meth:`~.Mutable.as_mutable` except it will intercept all occurrences
of ``MutableDict`` in all mappings unconditionally, without
the need to declare it individually::
MutableDict.associate_with(JSONEncodedDict)
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the
placement of a ``weakref.WeakKeyDictionary`` upon the value object, which
stores a mapping of parent mapped objects keyed to the attribute name under
which they are associated with this value. ``WeakKeyDictionary`` objects are
not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~MutableBase._parents` collection from the pickle
stream::
class MyMutableType(Mutable):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
With our dictionary example, we need to return the contents of the dict itself
(and also restore them on __setstate__)::
class MutableDict(Mutable, dict):
# ....
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
In the case that our mutable value object is pickled as it is attached to one
or more parent objects that are also part of the pickle, the :class:`.Mutable`
mixin will re-establish the :attr:`.Mutable._parents` collection on each value
object as the owning parents themselves are unpickled.
Receiving Events
----------------
The :meth:`.AttributeEvents.modified` event handler may be used to receive
an event when a mutable scalar emits a change event. This event handler
is called when the :func:`.attributes.flag_modified` function is called
from within the mutable extension::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import event
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
@event.listens_for(MyDataClass.data, "modified")
def modified_json(instance):
print("json value modified:", instance.data)
.. _mutable_composites:
Establishing Mutability on Composites
=====================================
Composites are a special ORM feature which allow a single scalar attribute to
be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
change events to its parents via the :meth:`.MutableComposite.changed` method.
In the case of a composite class, the detection is usually via the usage of
Python descriptors (i.e. ``@property``), or alternatively via the special
Python method ``__setattr__()``. Below we expand upon the ``Point`` class
introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite`
and to also route attribute set events via ``__setattr__`` to the
:meth:`.MutableComposite.changed` method::
from sqlalchemy.ext.mutable import MutableComposite
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
"Intercept set events"
# set the attribute
object.__setattr__(self, key, value)
# alert all parents to the change
self.changed()
def __composite_values__(self):
return self.x, self.y
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
The :class:`.MutableComposite` class uses a Python metaclass to automatically
establish listeners for any usage of :func:`_orm.composite` that specifies our
``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class,
listeners are established which will route change events from ``Point``
objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
from sqlalchemy.orm import composite, mapper
from sqlalchemy import Table, Column
vertices = Table('vertices', metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Vertex(object):
pass
mapper(Vertex, vertices, properties={
'start': composite(Point, vertices.c.x1, vertices.c.y1),
'end': composite(Point, vertices.c.x2, vertices.c.y2)
})
Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
>>> sess.add(v1)
>>> sess.commit()
>>> v1.end.x = 8
>>> assert v1 in sess.dirty
True
Coercing Mutable Composites
---------------------------
The :meth:`.MutableBase.coerce` method is also supported on composite types.
In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce`
method is only called for attribute set operations, not load operations.
Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent
to using a :func:`.validates` validation routine for all attributes which
make use of the custom composite type::
class Point(MutableComposite):
# other Point methods
# ...
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
elif not isinstance(value, Point):
raise ValueError("tuple or Point expected")
return value
Supporting Pickling
--------------------
As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper
class uses a ``weakref.WeakKeyDictionary`` available via the
:meth:`MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`MutableBase._parents` collection is restored to all ``Point`` objects.
"""
import weakref
from .. import event
from .. import types
from ..orm import Mapper
from ..orm import mapper
from ..orm import object_mapper
from ..orm.attributes import flag_modified
from ..sql.base import SchemaEventTarget
from ..util import memoized_property
class MutableBase(object):
"""Common base class to :class:`.Mutable`
and :class:`.MutableComposite`.
"""
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into the target type.
Can be overridden by custom subclasses to coerce incoming
data into a particular type.
By default, raises ``ValueError``.
This method is called in different scenarios depending on if
the parent class is of type :class:`.Mutable` or of type
:class:`.MutableComposite`. In the case of the former, it is called
for both attribute-set operations as well as during ORM loading
operations. For the latter, it is only called during attribute-set
operations; the mechanics of the :func:`.composite` construct
handle coercion during load operations.
:param key: string name of the ORM-mapped attribute being set.
:param value: the incoming value.
:return: the method should return the coerced value, or raise
``ValueError`` if the coercion cannot be completed.
"""
if value is None:
return None
msg = "Attribute '%s' does not accept objects of type %s"
raise ValueError(msg % (key, type(value)))
@classmethod
def _get_listen_keys(cls, attribute):
"""Given a descriptor attribute, return a ``set()`` of the attribute
keys which indicate a change in the state of this attribute.
This is normally just ``set([attribute.key])``, but can be overridden
to provide for additional keys. E.g. a :class:`.MutableComposite`
augments this set with the attribute keys associated with the columns
that comprise the composite value.
This collection is consulted in the case of intercepting the
:meth:`.InstanceEvents.refresh` and
:meth:`.InstanceEvents.refresh_flush` events, which pass along a list
of attribute names that have been refreshed; the list is compared
against this set to determine if action needs to be taken.
.. versionadded:: 1.0.5
"""
return {attribute.key}
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
listen_keys = cls._get_listen_keys(attribute)
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
``Mutable``.
"""
val = state.dict.get(key, None)
if val is not None:
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
val._parents[state.obj()] = key
def load_attrs(state, ctx, attrs):
if not attrs or listen_keys.intersection(attrs):
load(state)
def set_(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
outgoing.
"""
if value is oldvalue:
return value
if not isinstance(value, cls):
value = cls.coerce(key, value)
if value is not None:
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if val is not None:
if "ext.mutable.values" not in state_dict:
state_dict["ext.mutable.values"] = []
state_dict["ext.mutable.values"].append(val)
def unpickle(state, state_dict):
if "ext.mutable.values" in state_dict:
for val in state_dict["ext.mutable.values"]:
val._parents[state.obj()] = key
event.listen(parent_cls, "load", load, raw=True, propagate=True)
event.listen(
parent_cls, "refresh", load_attrs, raw=True, propagate=True
)
event.listen(
parent_cls, "refresh_flush", load_attrs, raw=True, propagate=True
)
event.listen(
attribute, "set", set_, raw=True, retval=True, propagate=True
)
event.listen(parent_cls, "pickle", pickle, raw=True, propagate=True)
event.listen(
parent_cls, "unpickle", unpickle, raw=True, propagate=True
)
class Mutable(MutableBase):
"""Mixin that defines transparent propagation of change
events to a parent object.
See the example in :ref:`mutable_scalars` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
flag_modified(parent, key)
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
cls._listen_on_attribute(attribute, True, attribute.class_)
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls
``associate_with_attribute`` automatically.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an
application, not with ad-hoc types else this will cause unbounded
growth in memory usage.
"""
def listen_for_type(mapper, class_):
if mapper.non_primary:
return
for prop in mapper.column_attrs:
if isinstance(prop.columns[0].type, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, "mapper_configured", listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
"""Associate a SQL type with this mutable Python type.
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('data', MyMutableType.as_mutable(PickleType))
)
Note that the returned type is always an instance, even if a class
is given, and that only columns which are declared specifically with
that type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :class:`.Mutable` subclass to establish a global
association.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
sqltype = types.to_instance(sqltype)
# a SchemaType will be copied when the Column is copied,
# and we'll lose our ability to link that type back to the original.
# so track our original type w/ columns
if isinstance(sqltype, SchemaEventTarget):
@event.listens_for(sqltype, "before_parent_attach")
def _add_column_memo(sqltyp, parent):
parent.info["_ext_mutable_orig_type"] = sqltyp
schema_event_check = True
else:
schema_event_check = False
def listen_for_type(mapper, class_):
if mapper.non_primary:
return
for prop in mapper.column_attrs:
if (
schema_event_check
and hasattr(prop.expression, "info")
and prop.expression.info.get("_ext_mutable_orig_type")
is sqltype
) or (prop.columns[0].type is sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, "mapper_configured", listen_for_type)
return sqltype
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
"""
@classmethod
def _get_listen_keys(cls, attribute):
return {attribute.key}.union(attribute.property._attribute_keys)
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(), prop._attribute_keys
):
setattr(parent, attr_name, value)
def _setup_composite_listener():
def _listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if (
hasattr(prop, "composite_class")
and isinstance(prop.composite_class, type)
and issubclass(prop.composite_class, MutableComposite)
):
prop.composite_class._listen_on_attribute(
getattr(class_, prop.key), False, class_
)
if not event.contains(Mapper, "mapper_configured", _listen_for_type):
event.listen(Mapper, "mapper_configured", _listen_for_type)
_setup_composite_listener()
class MutableDict(Mutable, dict):
"""A dictionary type that implements :class:`.Mutable`.
The :class:`.MutableDict` object implements a dictionary that will
emit change events to the underlying mapping when the contents of
the dictionary are altered, including when values are added or removed.
Note that :class:`.MutableDict` does **not** apply mutable tracking to the
*values themselves* inside the dictionary. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
dictionary structure, such as a JSON structure. To support this use case,
build a subclass of :class:`.MutableDict` that provides appropriate
coercion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. seealso::
:class:`.MutableList`
:class:`.MutableSet`
"""
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def setdefault(self, key, value):
result = dict.setdefault(self, key, value)
self.changed()
return result
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def update(self, *a, **kw):
dict.update(self, *a, **kw)
self.changed()
def pop(self, *arg):
result = dict.pop(self, *arg)
self.changed()
return result
def popitem(self):
result = dict.popitem(self)
self.changed()
return result
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, dict):
return cls(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
class MutableList(Mutable, list):
"""A list type that implements :class:`.Mutable`.
The :class:`.MutableList` object implements a list that will
emit change events to the underlying mapping when the contents of
the list are altered, including when values are added or removed.
Note that :class:`.MutableList` does **not** apply mutable tracking to the
*values themselves* inside the list. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
mutable structure, such as a JSON structure. To support this use case,
build a subclass of :class:`.MutableList` that provides appropriate
coercion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 1.1
.. seealso::
:class:`.MutableDict`
:class:`.MutableSet`
"""
def __reduce_ex__(self, proto):
return (self.__class__, (list(self),))
# needed for backwards compatibility with
# older pickles
def __setstate__(self, state):
self[:] = state
def __setitem__(self, index, value):
"""Detect list set events and emit change events."""
list.__setitem__(self, index, value)
self.changed()
def __setslice__(self, start, end, value):
"""Detect list set events and emit change events."""
list.__setslice__(self, start, end, value)
self.changed()
def __delitem__(self, index):
"""Detect list del events and emit change events."""
list.__delitem__(self, index)
self.changed()
def __delslice__(self, start, end):
"""Detect list del events and emit change events."""
list.__delslice__(self, start, end)
self.changed()
def pop(self, *arg):
result = list.pop(self, *arg)
self.changed()
return result
def append(self, x):
list.append(self, x)
self.changed()
def extend(self, x):
list.extend(self, x)
self.changed()
def __iadd__(self, x):
self.extend(x)
return self
def insert(self, i, x):
list.insert(self, i, x)
self.changed()
def remove(self, i):
list.remove(self, i)
self.changed()
def clear(self):
list.clear(self)
self.changed()
def sort(self, **kw):
list.sort(self, **kw)
self.changed()
def reverse(self):
list.reverse(self)
self.changed()
@classmethod
def coerce(cls, index, value):
"""Convert plain list to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, list):
return cls(value)
return Mutable.coerce(index, value)
else:
return value
class MutableSet(Mutable, set):
"""A set type that implements :class:`.Mutable`.
The :class:`.MutableSet` object implements a set that will
emit change events to the underlying mapping when the contents of
the set are altered, including when values are added or removed.
Note that :class:`.MutableSet` does **not** apply mutable tracking to the
*values themselves* inside the set. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
mutable structure. To support this use case,
build a subclass of :class:`.MutableSet` that provides appropriate
coercion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 1.1
.. seealso::
:class:`.MutableDict`
:class:`.MutableList`
"""
def update(self, *arg):
set.update(self, *arg)
self.changed()
def intersection_update(self, *arg):
set.intersection_update(self, *arg)
self.changed()
def difference_update(self, *arg):
set.difference_update(self, *arg)
self.changed()
def symmetric_difference_update(self, *arg):
set.symmetric_difference_update(self, *arg)
self.changed()
def __ior__(self, other):
self.update(other)
return self
def __iand__(self, other):
self.intersection_update(other)
return self
def __ixor__(self, other):
self.symmetric_difference_update(other)
return self
def __isub__(self, other):
self.difference_update(other)
return self
def add(self, elem):
set.add(self, elem)
self.changed()
def remove(self, elem):
set.remove(self, elem)
self.changed()
def discard(self, elem):
set.discard(self, elem)
self.changed()
def pop(self, *arg):
result = set.pop(self, *arg)
self.changed()
return result
def clear(self):
set.clear(self)
self.changed()
@classmethod
def coerce(cls, index, value):
"""Convert plain set to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, set):
return cls(value)
return Mutable.coerce(index, value)
else:
return value
def __getstate__(self):
return set(self)
def __setstate__(self, state):
self.update(state)
def __reduce_ex__(self, proto):
return (self.__class__, (list(self),))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/associationproxy.py | # ext/associationproxy.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import operator
from .. import exc
from .. import inspect
from .. import orm
from .. import util
from ..orm import collections
from ..orm import interfaces
from ..sql import or_
from ..sql.operators import ColumnOperators
def association_proxy(target_collection, attr, **kw):
r"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
The returned value is an instance of :class:`.AssociationProxy`.
Implements a Python property representing a relationship as a collection
of simpler values, or a scalar value. The proxied property will mimic
the collection type of the target (list, dict or set), or, in the case of
a one to one relationship, a simple scalar value.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
:param attr: Attribute on the associated instance or instances we'll
proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then
simply: getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol("ASSOCIATION_PROXY")
"""Symbol indicating an :class:`.InspectionAttr` that's
of type :class:`.AssociationProxy`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
"""
class AssociationProxy(interfaces.InspectionAttrInfo):
"""A descriptor that presents a read/write view of an object attribute."""
is_attribute = True
extension_type = ASSOCIATION_PROXY
def __init__(
self,
target_collection,
attr,
creator=None,
getset_factory=None,
proxy_factory=None,
proxy_bulk_set=None,
info=None,
cascade_scalar_deletes=False,
):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`_orm.relationship`.
:param attr: Attribute on the collected instances we'll proxy
for. For example, given a target collection of [obj1, obj2], a
list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
:param creator: Optional. When new items are added to this proxied
collection, new instances of the class collected by the target
collection will be created. For list and set collections, the
target class constructor will be called with the 'value' for the
new instance. For dict types, two arguments are passed:
key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
:param cascade_scalar_deletes: when True, indicates that setting
the proxied value to ``None``, or deleting it via ``del``, should
also remove the source object. Only applies to scalar attributes.
Normally, removing the proxied target will not remove the proxy
source, as this object may have other state that is still to be
kept.
.. versionadded:: 1.3
.. seealso::
:ref:`cascade_scalar_deletes` - complete usage example
:param getset_factory: Optional. Proxied attribute access is
automatically handled by routines that get and set values based on
the `attr` argument for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
:param proxy_factory: Optional. The type of collection to emulate is
determined by sniffing the target collection. If your collection
type can't be determined by duck typing or you'd like to use a
different collection implementation, you may supply a factory
function to produce those collections. Only applicable to
non-scalar relationships.
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
:param info: optional, will be assigned to
:attr:`.AssociationProxy.info` if present.
.. versionadded:: 1.0.9
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.cascade_scalar_deletes = cascade_scalar_deletes
self.key = "_%s_%s_%s" % (
type(self).__name__,
target_collection,
id(self),
)
if info:
self.info = info
def __get__(self, obj, class_):
if class_ is None:
return self
inst = self._as_instance(class_, obj)
if inst:
return inst.get(obj)
# obj has to be None here
# assert obj is None
return self
def __set__(self, obj, values):
class_ = type(obj)
return self._as_instance(class_, obj).set(obj, values)
def __delete__(self, obj):
class_ = type(obj)
return self._as_instance(class_, obj).delete(obj)
def for_class(self, class_, obj=None):
r"""Return the internal state local to a specific mapped class.
E.g., given a class ``User``::
class User(Base):
# ...
keywords = association_proxy('kws', 'keyword')
If we access this :class:`.AssociationProxy` from
:attr:`_orm.Mapper.all_orm_descriptors`, and we want to view the
target class for this proxy as mapped by ``User``::
inspect(User).all_orm_descriptors["keywords"].for_class(User).target_class
This returns an instance of :class:`.AssociationProxyInstance` that
is specific to the ``User`` class. The :class:`.AssociationProxy`
object remains agnostic of its parent class.
:param class\_: the class that we are returning state for.
:param obj: optional, an instance of the class that is required
if the attribute refers to a polymorphic target, e.g. where we have
to look at the type of the actual destination object to get the
complete path.
.. versionadded:: 1.3 - :class:`.AssociationProxy` no longer stores
any state specific to a particular parent class; the state is now
stored in per-class :class:`.AssociationProxyInstance` objects.
"""
return self._as_instance(class_, obj)
def _as_instance(self, class_, obj):
try:
inst = class_.__dict__[self.key + "_inst"]
except KeyError:
inst = None
# avoid exception context
if inst is None:
owner = self._calc_owner(class_)
if owner is not None:
inst = AssociationProxyInstance.for_proxy(self, owner, obj)
setattr(class_, self.key + "_inst", inst)
else:
inst = None
if inst is not None and not inst._is_canonical:
# the AssociationProxyInstance can't be generalized
# since the proxied attribute is not on the targeted
# class, only on subclasses of it, which might be
# different. only return for the specific
# object's current value
return inst._non_canonical_get_for_object(obj)
else:
return inst
def _calc_owner(self, target_cls):
# we might be getting invoked for a subclass
# that is not mapped yet, in some declarative situations.
# save until we are mapped
try:
insp = inspect(target_cls)
except exc.NoInspectionAvailable:
# can't find a mapper, don't set owner. if we are a not-yet-mapped
# subclass, we can also scan through __mro__ to find a mapped
# class, but instead just wait for us to be called again against a
# mapped class normally.
return None
else:
return insp.mapper.class_manager.class_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
setattr(o, attr, v)
else:
def setter(o, v):
setattr(o, attr, v)
return getter, setter
def __repr__(self):
return "AssociationProxy(%r, %r)" % (
self.target_collection,
self.value_attr,
)
class AssociationProxyInstance(object):
"""A per-class object that serves class- and object-specific results.
This is used by :class:`.AssociationProxy` when it is invoked
in terms of a specific class or instance of a class, i.e. when it is
used as a regular Python descriptor.
When referring to the :class:`.AssociationProxy` as a normal Python
descriptor, the :class:`.AssociationProxyInstance` is the object that
actually serves the information. Under normal circumstances, its presence
is transparent::
>>> User.keywords.scalar
False
In the special case that the :class:`.AssociationProxy` object is being
accessed directly, in order to get an explicit handle to the
:class:`.AssociationProxyInstance`, use the
:meth:`.AssociationProxy.for_class` method::
proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User)
# view if proxy object is scalar or not
>>> proxy_state.scalar
False
.. versionadded:: 1.3
""" # noqa
def __init__(self, parent, owning_class, target_class, value_attr):
self.parent = parent
self.key = parent.key
self.owning_class = owning_class
self.target_collection = parent.target_collection
self.collection_class = None
self.target_class = target_class
self.value_attr = value_attr
target_class = None
"""The intermediary class handled by this
:class:`.AssociationProxyInstance`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
@classmethod
def for_proxy(cls, parent, owning_class, parent_instance):
target_collection = parent.target_collection
value_attr = parent.value_attr
prop = orm.class_mapper(owning_class).get_property(target_collection)
# this was never asserted before but this should be made clear.
if not isinstance(prop, orm.RelationshipProperty):
util.raise_(
NotImplementedError(
"association proxy to a non-relationship "
"intermediary is not supported"
),
replace_context=None,
)
target_class = prop.mapper.class_
try:
target_assoc = cls._cls_unwrap_target_assoc_proxy(
target_class, value_attr
)
except AttributeError:
# the proxied attribute doesn't exist on the target class;
# return an "ambiguous" instance that will work on a per-object
# basis
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return cls._construct_for_assoc(
target_assoc, parent, owning_class, target_class, value_attr
)
@classmethod
def _construct_for_assoc(
cls, target_assoc, parent, owning_class, target_class, value_attr
):
if target_assoc is not None:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
attr = getattr(target_class, value_attr)
if not hasattr(attr, "_is_internal_proxy"):
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
is_object = attr._impl_uses_objects
if is_object:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return ColumnAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
def _get_property(self):
return orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
@property
def _comparator(self):
return self._get_property().comparator
@classmethod
def _cls_unwrap_target_assoc_proxy(cls, target_class, value_attr):
attr = getattr(target_class, value_attr)
if isinstance(attr, (AssociationProxy, AssociationProxyInstance)):
return attr
return None
@util.memoized_property
def _unwrap_target_assoc_proxy(self):
return self._cls_unwrap_target_assoc_proxy(
self.target_class, self.value_attr
)
@property
def remote_attr(self):
"""The 'remote' class attribute referenced by this
:class:`.AssociationProxyInstance`.
.. seealso::
:attr:`.AssociationProxyInstance.attr`
:attr:`.AssociationProxyInstance.local_attr`
"""
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
"""The 'local' class attribute referenced by this
:class:`.AssociationProxyInstance`.
.. seealso::
:attr:`.AssociationProxyInstance.attr`
:attr:`.AssociationProxyInstance.remote_attr`
"""
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
using :meth:`_query.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
.. seealso::
:attr:`.AssociationProxyInstance.local_attr`
:attr:`.AssociationProxyInstance.remote_attr`
"""
return (self.local_attr, self.remote_attr)
@util.memoized_property
def scalar(self):
"""Return ``True`` if this :class:`.AssociationProxyInstance`
proxies a scalar relationship on the local side."""
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return (
not self._get_property()
.mapper.get_property(self.value_attr)
.uselist
)
@property
def _target_is_object(self):
raise NotImplementedError()
def _initialize_scalar_accessors(self):
if self.parent.getset_factory:
get, set_ = self.parent.getset_factory(None, self)
else:
get, set_ = self.parent._default_getset(None)
self._scalar_get, self._scalar_set = get, set_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
return setattr(o, attr, v)
else:
def setter(o, v):
return setattr(o, attr, v)
return getter, setter
@property
def info(self):
return self.parent.info
def get(self, obj):
if obj is None:
return self
if self.scalar:
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, self_id, proxy = getattr(obj, self.key)
except AttributeError:
pass
else:
if id(obj) == creator_id and id(self) == self_id:
assert self.collection_class is not None
return proxy
self.collection_class, proxy = self._new(
_lazy_collection(obj, self.target_collection)
)
setattr(obj, self.key, (id(obj), id(self), proxy))
return proxy
def set(self, obj, values):
if self.scalar:
creator = (
self.parent.creator
if self.parent.creator
else self.target_class
)
target = getattr(obj, self.target_collection)
if target is None:
if values is None:
return
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
if values is None and self.parent.cascade_scalar_deletes:
setattr(obj, self.target_collection, None)
else:
proxy = self.get(obj)
assert self.collection_class is not None
if proxy is not values:
proxy._bulk_replace(self, values)
def delete(self, obj):
if self.owning_class is None:
self._calc_owner(obj, None)
if self.scalar:
target = getattr(obj, self.target_collection)
if target is not None:
delattr(target, self.value_attr)
delattr(obj, self.target_collection)
def _new(self, lazy_collection):
creator = (
self.parent.creator if self.parent.creator else self.target_class
)
collection_class = util.duck_type_collection(lazy_collection())
if self.parent.proxy_factory:
return (
collection_class,
self.parent.proxy_factory(
lazy_collection, creator, self.value_attr, self
),
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(collection_class, self)
else:
getter, setter = self.parent._default_getset(collection_class)
if collection_class is list:
return (
collection_class,
_AssociationList(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is dict:
return (
collection_class,
_AssociationDict(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is set:
return (
collection_class,
_AssociationSet(
lazy_collection, creator, getter, setter, self
),
)
else:
raise exc.ArgumentError(
"could not guess which interface to use for "
'collection_class "%s" backing "%s"; specify a '
"proxy_factory and proxy_bulk_set manually"
% (self.collection_class.__name__, self.target_collection)
)
def _set(self, proxy, values):
if self.parent.proxy_bulk_set:
self.parent.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
"no proxy_bulk_set supplied for custom "
"collection_class implementation"
)
def _inflate(self, proxy):
creator = (
self.parent.creator and self.parent.creator or self.target_class
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(
self.collection_class, self
)
else:
getter, setter = self.parent._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _criterion_exists(self, criterion=None, **kwargs):
is_has = kwargs.pop("is_has", None)
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
inner = target_assoc._criterion_exists(
criterion=criterion, **kwargs
)
return self._comparator._criterion_exists(inner)
if self._target_is_object:
prop = getattr(self.target_class, self.value_attr)
value_expr = prop._criterion_exists(criterion, **kwargs)
else:
if kwargs:
raise exc.ArgumentError(
"Can't apply keyword arguments to column-targeted "
"association proxy; use =="
)
elif is_has and criterion is not None:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use =="
)
value_expr = criterion
return self._comparator._criterion_exists(value_expr)
def any(self, criterion=None, **kwargs):
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._unwrap_target_assoc_proxy is None and (
self.scalar
and (not self._target_is_object or self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'any()' not implemented for scalar " "attributes. Use has()."
)
return self._criterion_exists(
criterion=criterion, is_has=False, **kwargs
)
def has(self, criterion=None, **kwargs):
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._unwrap_target_assoc_proxy is None and (
not self.scalar
or (self._target_is_object and not self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(
criterion=criterion, is_has=True, **kwargs
)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.parent)
class AmbiguousAssociationProxyInstance(AssociationProxyInstance):
"""an :class:`.AssociationProxyInstance` where we cannot determine
the type of target object.
"""
_is_canonical = False
def _ambiguous(self):
raise AttributeError(
"Association proxy %s.%s refers to an attribute '%s' that is not "
"directly mapped on class %s; therefore this operation cannot "
"proceed since we don't know what type of object is referred "
"towards"
% (
self.owning_class.__name__,
self.target_collection,
self.value_attr,
self.target_class,
)
)
def get(self, obj):
if obj is None:
return self
else:
return super(AmbiguousAssociationProxyInstance, self).get(obj)
def __eq__(self, obj):
self._ambiguous()
def __ne__(self, obj):
self._ambiguous()
def any(self, criterion=None, **kwargs):
self._ambiguous()
def has(self, criterion=None, **kwargs):
self._ambiguous()
@util.memoized_property
def _lookup_cache(self):
# mapping of <subclass>->AssociationProxyInstance.
# e.g. proxy is A-> A.b -> B -> B.b_attr, but B.b_attr doesn't exist;
# only B1(B) and B2(B) have "b_attr", keys in here would be B1, B2
return {}
def _non_canonical_get_for_object(self, parent_instance):
if parent_instance is not None:
actual_obj = getattr(parent_instance, self.target_collection)
if actual_obj is not None:
try:
insp = inspect(actual_obj)
except exc.NoInspectionAvailable:
pass
else:
mapper = insp.mapper
instance_class = mapper.class_
if instance_class not in self._lookup_cache:
self._populate_cache(instance_class, mapper)
try:
return self._lookup_cache[instance_class]
except KeyError:
pass
# no object or ambiguous object given, so return "self", which
# is a proxy with generally only instance-level functionality
return self
def _populate_cache(self, instance_class, mapper):
prop = orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
if mapper.isa(prop.mapper):
target_class = instance_class
try:
target_assoc = self._cls_unwrap_target_assoc_proxy(
target_class, self.value_attr
)
except AttributeError:
pass
else:
self._lookup_cache[instance_class] = self._construct_for_assoc(
target_assoc,
self.parent,
self.owning_class,
target_class,
self.value_attr,
)
class ObjectAssociationProxyInstance(AssociationProxyInstance):
"""an :class:`.AssociationProxyInstance` that has an object as a target.
"""
_target_is_object = True
_is_canonical = True
def contains(self, obj):
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,
and/or :meth:`.RelationshipProperty.Comparator.contains`
operators of the underlying proxied attributes.
"""
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
return self._comparator._criterion_exists(
target_assoc.contains(obj)
if not target_assoc.scalar
else target_assoc == obj
)
elif (
self._target_is_object
and self.scalar
and not self._value_is_scalar
):
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
elif self._target_is_object and self.scalar and self._value_is_scalar:
raise exc.InvalidRequestError(
"contains() doesn't apply to a scalar object endpoint; use =="
)
else:
return self._comparator._criterion_exists(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None,
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj
)
class ColumnAssociationProxyInstance(
ColumnOperators, AssociationProxyInstance
):
"""an :class:`.AssociationProxyInstance` that has a database column as a
target.
"""
_target_is_object = False
_is_canonical = True
def __eq__(self, other):
# special case "is None" to check for no related row as well
expr = self._criterion_exists(
self.remote_attr.operate(operator.eq, other)
)
if other is None:
return or_(expr, self._comparator == None)
else:
return expr
def operate(self, op, *other, **kwargs):
return self._criterion_exists(
self.remote_attr.operate(op, *other, **kwargs)
)
class _lazy_collection(object):
def __init__(self, obj, target):
self.parent = obj
self.target = target
def __call__(self):
return getattr(self.parent, self.target)
def __getstate__(self):
return {"obj": self.parent, "target": self.target}
def __setstate__(self, state):
self.parent = state["obj"]
self.target = state["target"]
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {"parent": self.parent, "lazy_collection": self.lazy_collection}
def __setstate__(self, state):
self.parent = state["parent"]
self.lazy_collection = state["lazy_collection"]
self.parent._inflate(self)
def _bulk_replace(self, assoc_proxy, values):
self.clear()
assoc_proxy._set(self, values)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, value):
return self.setter(object_, value)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value), len(rng))
)
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
return
def append(self, value):
col = self.col
item = self._create(value)
col.append(item)
def count(self, value):
return sum(
[
1
for _ in util.itertools_filter(
lambda v: v == value, iter(self)
)
]
)
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0 : len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return util.cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def index(self, item, *args):
return list(self).index(item, *args)
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
util.callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol("_NotProvided")
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, key, value):
return self.setter(object_, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return util.cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError(
"update expected at most 1 arguments, got %i" % len(a)
)
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, "keys"):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError as err:
util.raise_(
ValueError(
"dictionary update sequence "
"requires 2-element tuples"
),
replace_context=err,
)
for key, value in kw:
self[key] = value
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
for key, member in values.items() or ():
if key in additions:
self[key] = member
elif key in constants:
self[key] = member
for key in removals:
del self[key]
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
util.callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(dict, func_name)
):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
return
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError("pop from an empty set")
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
appender = self.add
remover = self.remove
for member in values or ():
if member in additions:
appender(member)
elif member in constants:
appender(member)
for member in removals:
remover(member)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
util.callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(set, func_name)
):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/declarative/clsregistry.py | # ext/declarative/clsregistry.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle the string class registry used by declarative.
This system allows specification of classes and expressions used in
:func:`_orm.relationship` using strings.
"""
import weakref
from ... import exc
from ... import inspection
from ... import util
from ...orm import class_mapper
from ...orm import interfaces
from ...orm.properties import ColumnProperty
from ...orm.properties import RelationshipProperty
from ...orm.properties import SynonymProperty
from ...schema import _get_table_key
# strong references to registries which we place in
# the _decl_class_registry, which is usually weak referencing.
# the internal registries here link to classes with weakrefs and remove
# themselves when all references to contained classes are removed.
_registries = set()
def add_class(classname, cls):
"""Add a class to the _decl_class_registry associated with the
given declarative class.
"""
if classname in cls._decl_class_registry:
# class already exists.
existing = cls._decl_class_registry[classname]
if not isinstance(existing, _MultipleClassMarker):
existing = cls._decl_class_registry[
classname
] = _MultipleClassMarker([cls, existing])
else:
cls._decl_class_registry[classname] = cls
try:
root_module = cls._decl_class_registry["_sa_module_registry"]
except KeyError:
cls._decl_class_registry[
"_sa_module_registry"
] = root_module = _ModuleMarker("_sa_module_registry", None)
tokens = cls.__module__.split(".")
# build up a tree like this:
# modulename: myapp.snacks.nuts
#
# myapp->snack->nuts->(classes)
# snack->nuts->(classes)
# nuts->(classes)
#
# this allows partial token paths to be used.
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.add_class(classname, cls)
class _MultipleClassMarker(object):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
__slots__ = "on_remove", "contents", "__weakref__"
def __init__(self, classes, on_remove=None):
self.on_remove = on_remove
self.contents = set(
[weakref.ref(item, self._remove_item) for item in classes]
)
_registries.add(self)
def __iter__(self):
return (ref() for ref in self.contents)
def attempt_get(self, path, key):
if len(self.contents) > 1:
raise exc.InvalidRequestError(
'Multiple classes found for path "%s" '
"in the registry of this declarative "
"base. Please use a fully module-qualified path."
% (".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref):
self.contents.remove(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item):
# protect against class registration race condition against
# asynchronous garbage collection calling _remove_item,
# [ticket:3208]
modules = set(
[
cls.__module__
for cls in [ref() for ref in self.contents]
if cls is not None
]
)
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table."
% (item.__module__, item.__name__)
)
self.contents.add(weakref.ref(item, self._remove_item))
class _ModuleMarker(object):
""""refers to a module name within
_decl_class_registry.
"""
__slots__ = "parent", "name", "contents", "mod_ns", "path", "__weakref__"
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.contents = {}
self.mod_ns = _ModNS(self)
if self.parent:
self.path = self.parent.path + [self.name]
else:
self.path = []
_registries.add(self)
def __contains__(self, name):
return name in self.contents
def __getitem__(self, name):
return self.contents[name]
def _remove_item(self, name):
self.contents.pop(name, None)
if not self.contents and self.parent is not None:
self.parent._remove_item(self.name)
_registries.discard(self)
def resolve_attr(self, key):
return getattr(self.mod_ns, key)
def get_module(self, name):
if name not in self.contents:
marker = _ModuleMarker(name, self)
self.contents[name] = marker
else:
marker = self.contents[name]
return marker
def add_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.add_item(cls)
else:
existing = self.contents[name] = _MultipleClassMarker(
[cls], on_remove=lambda: self._remove_item(name)
)
class _ModNS(object):
__slots__ = ("__parent",)
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise AttributeError(
"Module %r has no mapped classes "
"registered under the name %r" % (self.__parent.name, key)
)
class _GetColumns(object):
__slots__ = ("cls",)
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise exc.InvalidRequestError(
"Class %r does not have a mapped column named %r"
% (self.cls, key)
)
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NOT_EXTENSION:
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key
)
return getattr(self.cls, key)
inspection._inspects(_GetColumns)(
lambda target: inspection.inspect(target.cls)
)
class _GetTable(object):
__slots__ = "key", "metadata"
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[_get_table_key(key, self.key)]
def _determine_container(key, value):
if isinstance(value, _MultipleClassMarker):
value = value.attempt_get([], key)
return _GetColumns(value)
class _class_resolver(object):
def __init__(self, cls, prop, fallback, arg):
self.cls = cls
self.prop = prop
self.arg = self._declarative_arg = arg
self.fallback = fallback
self._dict = util.PopulateDict(self._access_cls)
self._resolvers = ()
def _access_cls(self, key):
cls = self.cls
if key in cls._decl_class_registry:
return _determine_container(key, cls._decl_class_registry[key])
elif key in cls.metadata.tables:
return cls.metadata.tables[key]
elif key in cls.metadata._schemas:
return _GetTable(key, cls.metadata)
elif (
"_sa_module_registry" in cls._decl_class_registry
and key in cls._decl_class_registry["_sa_module_registry"]
):
registry = cls._decl_class_registry["_sa_module_registry"]
return registry.resolve_attr(key)
elif self._resolvers:
for resolv in self._resolvers:
value = resolv(key)
if value is not None:
return value
return self.fallback[key]
def _raise_for_name(self, name, err):
util.raise_(
exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined."
% (self.prop.parent, self.arg, name, self.cls)
),
from_=err,
)
def _resolve_name(self):
name = self.arg
d = self._dict
rval = None
try:
for token in name.split("."):
if rval is None:
rval = d[token]
else:
rval = getattr(rval, token)
except KeyError as err:
self._raise_for_name(name, err)
except NameError as n:
self._raise_for_name(n.args[0], n)
else:
if isinstance(rval, _GetColumns):
return rval.cls
else:
return rval
def __call__(self):
try:
x = eval(self.arg, globals(), self._dict)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError as n:
self._raise_for_name(n.args[0], n)
def _resolver(cls, prop):
import sqlalchemy
from sqlalchemy.orm import foreign, remote
fallback = sqlalchemy.__dict__.copy()
fallback.update({"foreign": foreign, "remote": remote})
def resolve_arg(arg):
return _class_resolver(cls, prop, fallback, arg)
def resolve_name(arg):
return _class_resolver(cls, prop, fallback, arg)._resolve_name
return resolve_name, resolve_arg
def _deferred_relationship(cls, prop):
if isinstance(prop, RelationshipProperty):
resolve_name, resolve_arg = _resolver(cls, prop)
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"_user_defined_foreign_keys",
"remote_side",
):
v = getattr(prop, attr)
if isinstance(v, util.string_types):
setattr(prop, attr, resolve_arg(v))
for attr in ("argument",):
v = getattr(prop, attr)
if isinstance(v, util.string_types):
setattr(prop, attr, resolve_name(v))
if prop.backref and isinstance(prop.backref, tuple):
key, kwargs = prop.backref
for attr in (
"primaryjoin",
"secondaryjoin",
"secondary",
"foreign_keys",
"remote_side",
"order_by",
):
if attr in kwargs and isinstance(
kwargs[attr], util.string_types
):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/declarative/__init__.py | # ext/declarative/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import AbstractConcreteBase
from .api import as_declarative
from .api import comparable_using
from .api import ConcreteBase
from .api import declarative_base
from .api import DeclarativeMeta
from .api import declared_attr
from .api import DeferredReflection
from .api import has_inherited_table
from .api import instrument_declarative
from .api import synonym_for
__all__ = [
"declarative_base",
"synonym_for",
"has_inherited_table",
"comparable_using",
"instrument_declarative",
"declared_attr",
"as_declarative",
"ConcreteBase",
"AbstractConcreteBase",
"DeclarativeMeta",
"DeferredReflection",
]
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/declarative/api.py | # ext/declarative/api.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions and helpers for declarative."""
import re
import weakref
from .base import _add_attribute
from .base import _as_declarative
from .base import _declarative_constructor
from .base import _DeferredMapperConfig
from .base import _del_attribute
from .clsregistry import _class_resolver
from ... import exc
from ... import inspection
from ... import util
from ...orm import attributes
from ...orm import comparable_property
from ...orm import exc as orm_exc
from ...orm import interfaces
from ...orm import properties
from ...orm import synonym as _orm_synonym
from ...orm.base import _inspect_mapped_class
from ...orm.base import _mapper_or_none
from ...orm.util import polymorphic_union
from ...schema import MetaData
from ...schema import Table
from ...util import hybridmethod
from ...util import hybridproperty
from ...util import OrderedDict
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
"""
if "_decl_class_registry" in cls.__dict__:
raise exc.InvalidRequestError(
"Class %r already has been " "instrumented declaratively" % cls
)
cls._decl_class_registry = registry
cls.metadata = metadata
_as_declarative(cls, cls.__name__, cls.__dict__)
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
This is used in declarative mixins to build attributes that behave
differently for the base class vs. a subclass in an inheritance
hierarchy.
.. seealso::
:ref:`decl_mixin_inheritance`
"""
for class_ in cls.__mro__[1:]:
if getattr(class_, "__table__", None) is not None:
return True
return False
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if "_decl_class_registry" not in cls.__dict__:
_as_declarative(cls, classname, cls.__dict__)
type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
_add_attribute(cls, key, value)
def __delattr__(cls, key):
_del_attribute(cls, key)
def synonym_for(name, map_column=False):
"""Decorator that produces an :func:`_orm.synonym`
attribute in conjunction
with a Python descriptor.
The function being decorated is passed to :func:`_orm.synonym` as the
:paramref:`.orm.synonym.descriptor` parameter::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
_job_status = Column("job_status", String(50))
@synonym_for("job_status")
@property
def job_status(self):
return "Status: %s" % self._job_status
The :ref:`hybrid properties <mapper_hybrids>` feature of SQLAlchemy
is typically preferred instead of synonyms, which is a more legacy
feature.
.. seealso::
:ref:`synonyms` - Overview of synonyms
:func:`_orm.synonym` - the mapper-level function
:ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
updated approach to augmenting attribute behavior more flexibly than
can be achieved with synonyms.
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
This is a decorator front end to
:func:`~sqlalchemy.orm.comparable_property` that passes
through the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
class declared_attr(interfaces._MappedAttribute, property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
of the attribute would be.
@declared_attr is more often than not applicable to mixins,
to define relationships that are to be applied to different
implementors of the class::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user(self):
return relationship("User")
It also can be applied to mapped classes, such as to provide
a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
"""
def __init__(self, fget, cascading=False):
super(declared_attr, self).__init__(fget)
self.__doc__ = fget.__doc__
self._cascading = cascading
def __get__(desc, self, cls):
reg = cls.__dict__.get("_sa_declared_attr_reg", None)
if reg is None:
if (
not re.match(r"^__.+__$", desc.fget.__name__)
and attributes.manager_of_class(cls) is None
):
util.warn(
"Unmanaged access of declarative attribute %s from "
"non-mapped class %s" % (desc.fget.__name__, cls.__name__)
)
return desc.fget(cls)
elif desc in reg:
return reg[desc]
else:
reg[desc] = obj = desc.fget(cls)
return obj
@hybridmethod
def _stateful(cls, **kw):
return _stateful_declared_attr(**kw)
@hybridproperty
def cascading(cls):
"""Mark a :class:`.declared_attr` as cascading.
This is a special-use modifier which indicates that a column
or MapperProperty-based declared attribute should be configured
distinctly per mapped subclass, within a mapped-inheritance scenario.
.. warning::
The :attr:`.declared_attr.cascading` modifier has several
limitations:
* The flag **only** applies to the use of :class:`.declared_attr`
on declarative mixin classes and ``__abstract__`` classes; it
currently has no effect when used on a mapped class directly.
* The flag **only** applies to normally-named attributes, e.g.
not any special underscore attributes such as ``__tablename__``.
On these attributes it has **no** effect.
* The flag currently **does not allow further overrides** down
the class hierarchy; if a subclass tries to override the
attribute, a warning is emitted and the overridden attribute
is skipped. This is a limitation that it is hoped will be
resolved at some point.
Below, both MyClass as well as MySubClass will have a distinct
``id`` Column object established::
class HasIdMixin(object):
@declared_attr.cascading
def id(cls):
if has_inherited_table(cls):
return Column(
ForeignKey('myclass.id'), primary_key=True)
else:
return Column(Integer, primary_key=True)
class MyClass(HasIdMixin, Base):
__tablename__ = 'myclass'
# ...
class MySubClass(MyClass):
""
# ...
The behavior of the above configuration is that ``MySubClass``
will refer to both its own ``id`` column as well as that of
``MyClass`` underneath the attribute named ``some_id``.
.. seealso::
:ref:`declarative_inheritance`
:ref:`mixin_inheritance_columns`
"""
return cls._stateful(cascading=True)
class _stateful_declared_attr(declared_attr):
def __init__(self, **kw):
self.kw = kw
def _stateful(self, **kw):
new_kw = self.kw.copy()
new_kw.update(kw)
return _stateful_declared_attr(**new_kw)
def __call__(self, fn):
return declared_attr(fn, **self.kw)
def declarative_base(
bind=None,
metadata=None,
mapper=None,
cls=object,
name="Base",
constructor=_declarative_constructor,
class_registry=None,
metaclass=DeclarativeMeta,
):
r"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
:param bind: An optional
:class:`~sqlalchemy.engine.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.schema.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.schema.MetaData` instance will be available via the
`metadata` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Defaults to
:func:`~sqlalchemy.ext.declarative.base._declarative_constructor`, an
__init__ implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`_orm.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
inter-base relationships.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
.. versionchanged:: 1.1 if :paramref:`.declarative_base.cls` is a
single class (rather than a tuple), the constructed base class will
inherit its docstring.
.. seealso::
:func:`.as_declarative`
"""
lcl_metadata = metadata or MetaData()
if bind:
lcl_metadata.bind = bind
if class_registry is None:
class_registry = weakref.WeakValueDictionary()
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(
_decl_class_registry=class_registry, metadata=lcl_metadata
)
if isinstance(cls, type):
class_dict["__doc__"] = cls.__doc__
if constructor:
class_dict["__init__"] = constructor
if mapper:
class_dict["__mapper_cls__"] = mapper
return metaclass(name, bases, class_dict)
def as_declarative(**kw):
"""
Class decorator for :func:`.declarative_base`.
Provides a syntactical shortcut to the ``cls`` argument
sent to :func:`.declarative_base`, allowing the base class
to be converted in-place to a "declarative" base::
from sqlalchemy.ext.declarative import as_declarative
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base):
# ...
All keyword arguments passed to :func:`.as_declarative` are passed
along to :func:`.declarative_base`.
.. seealso::
:func:`.declarative_base`
"""
def decorate(cls):
kw["cls"] = cls
kw["name"] = cls.__name__
return declarative_base(**kw)
return decorate
class ConcreteBase(object):
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
.. seealso::
:class:`.AbstractConcreteBase`
:ref:`concrete_inheritance`
"""
@classmethod
def _create_polymorphic_union(cls, mappers):
return polymorphic_union(
OrderedDict(
(mp.polymorphic_identity, mp.local_table) for mp in mappers
),
"type",
"pjoin",
)
@classmethod
def __declare_first__(cls):
m = cls.__mapper__
if m.with_polymorphic:
return
mappers = list(m.self_and_descendants)
pjoin = cls._create_polymorphic_union(mappers)
m._set_with_polymorphic(("*", pjoin))
m._set_polymorphic_on(pjoin.c.type)
class AbstractConcreteBase(ConcreteBase):
"""A helper class for 'concrete' declarative mappings.
:class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.AbstractConcreteBase` does produce a mapped class
for the base class, however it is not persisted to any table; it
is instead mapped directly to the "polymorphic" selectable directly
and is only used for selecting. Compare to :class:`.ConcreteBase`,
which does create a persisted table for the base class.
.. note::
The :class:`.AbstractConcreteBase` class does not intend to set up the
mapping for the base class until all the subclasses have been defined,
as it needs to create a mapping against a selectable that will include
all subclass tables. In order to achieve this, it waits for the
**mapper configuration event** to occur, at which point it scans
through all the configured subclasses and sets up a mapping that will
query against all subclasses at once.
While this event is normally invoked automatically, in the case of
:class:`.AbstractConcreteBase`, it may be necessary to invoke it
explicitly after **all** subclass mappings are defined, if the first
operation is to be a query against this base class. To do so, invoke
:func:`.configure_mappers` once all the desired classes have been
configured::
from sqlalchemy.orm import configure_mappers
configure_mappers()
.. seealso::
:func:`_orm.configure_mappers`
Example::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
configure_mappers()
The abstract base class is handled by declarative in a special way;
at class configuration time, it behaves like a declarative mixin
or an ``__abstract__`` base class. Once classes are configured
and mappings are produced, it then gets mapped itself, but
after all of its descendants. This is a very unique system of mapping
not found in any other SQLAlchemy system.
Using this approach, we can specify columns and properties
that will take place on mapped subclasses, in the way that
we normally do as in :ref:`declarative_mixins`::
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
class Employee(AbstractConcreteBase, Base):
employee_id = Column(Integer, primary_key=True)
@declared_attr
def company_id(cls):
return Column(ForeignKey('company.id'))
@declared_attr
def company(cls):
return relationship("Company")
class Manager(Employee):
__tablename__ = 'manager'
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
configure_mappers()
When we make use of our mappings however, both ``Manager`` and
``Employee`` will have an independently usable ``.company`` attribute::
session.query(Employee).filter(Employee.company.has(id=5))
.. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase`
have been reworked to support relationships established directly
on the abstract base, without any special configurational steps.
.. seealso::
:class:`.ConcreteBase`
:ref:`concrete_inheritance`
"""
__no_table__ = True
@classmethod
def __declare_first__(cls):
cls._sa_decl_prepare_nocascade()
@classmethod
def _sa_decl_prepare_nocascade(cls):
if getattr(cls, "__mapper__", None):
return
to_map = _DeferredMapperConfig.config_for_cls(cls)
# can't rely on 'self_and_descendants' here
# since technically an immediate subclass
# might not be mapped, but a subclass
# may be.
mappers = []
stack = list(cls.__subclasses__())
while stack:
klass = stack.pop()
stack.extend(klass.__subclasses__())
mn = _mapper_or_none(klass)
if mn is not None:
mappers.append(mn)
pjoin = cls._create_polymorphic_union(mappers)
# For columns that were declared on the class, these
# are normally ignored with the "__no_table__" mapping,
# unless they have a different attribute key vs. col name
# and are in the properties argument.
# In that case, ensure we update the properties entry
# to the correct column from the pjoin target table.
declared_cols = set(to_map.declared_columns)
for k, v in list(to_map.properties.items()):
if v in declared_cols:
to_map.properties[k] = pjoin.c[v.key]
to_map.local_table = pjoin
m_args = to_map.mapper_args_fn or dict
def mapper_args():
args = m_args()
args["polymorphic_on"] = pjoin.c.type
return args
to_map.mapper_args_fn = mapper_args
m = to_map.map()
for scls in cls.__subclasses__():
sm = _mapper_or_none(scls)
if sm and sm.concrete and cls in scls.__bases__:
sm._set_concrete_base(m)
@classmethod
def _sa_raise_deferred_config(cls):
raise orm_exc.UnmappedClassError(
cls,
msg="Class %s is a subclass of AbstractConcreteBase and "
"has a mapping pending until all subclasses are defined. "
"Call the sqlalchemy.orm.configure_mappers() function after "
"all subclasses have been defined to "
"complete the mapping of this class."
% orm_exc._safe_cls_name(cls),
)
class DeferredReflection(object):
"""A helper class for construction of mappings based on
a deferred reflection step.
Normally, declarative can be used with reflection by
setting a :class:`_schema.Table` object using autoload=True
as the ``__table__`` attribute on a declarative class.
The caveat is that the :class:`_schema.Table` must be fully
reflected, or at the very least have a primary key column,
at the point at which a normal declarative mapping is
constructed, meaning the :class:`_engine.Engine` must be available
at class declaration time.
The :class:`.DeferredReflection` mixin moves the construction
of mappers to be at a later point, after a specific
method is called which first reflects all :class:`_schema.Table`
objects created so far. Classes can define it as such::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import DeferredReflection
Base = declarative_base()
class MyClass(DeferredReflection, Base):
__tablename__ = 'mytable'
Above, ``MyClass`` is not yet mapped. After a series of
classes have been defined in the above fashion, all tables
can be reflected and mappings created using
:meth:`.prepare`::
engine = create_engine("someengine://...")
DeferredReflection.prepare(engine)
The :class:`.DeferredReflection` mixin can be applied to individual
classes, used as the base for the declarative base itself,
or used in a custom abstract class. Using an abstract base
allows that only a subset of classes to be prepared for a
particular prepare step, which is necessary for applications
that use more than one engine. For example, if an application
has two engines, you might use two bases, and prepare each
separately, e.g.::
class ReflectedOne(DeferredReflection, Base):
__abstract__ = True
class ReflectedTwo(DeferredReflection, Base):
__abstract__ = True
class MyClass(ReflectedOne):
__tablename__ = 'mytable'
class MyOtherClass(ReflectedOne):
__tablename__ = 'myothertable'
class YetAnotherClass(ReflectedTwo):
__tablename__ = 'yetanothertable'
# ... etc.
Above, the class hierarchies for ``ReflectedOne`` and
``ReflectedTwo`` can be configured separately::
ReflectedOne.prepare(engine_one)
ReflectedTwo.prepare(engine_two)
"""
@classmethod
def prepare(cls, engine):
"""Reflect all :class:`_schema.Table` objects for all current
:class:`.DeferredReflection` subclasses"""
to_map = _DeferredMapperConfig.classes_for_base(cls)
for thingy in to_map:
cls._sa_decl_prepare(thingy.local_table, engine)
thingy.map()
mapper = thingy.cls.__mapper__
metadata = mapper.class_.metadata
for rel in mapper._props.values():
if (
isinstance(rel, properties.RelationshipProperty)
and rel.secondary is not None
):
if isinstance(rel.secondary, Table):
cls._reflect_table(rel.secondary, engine)
elif isinstance(rel.secondary, _class_resolver):
rel.secondary._resolvers += (
cls._sa_deferred_table_resolver(engine, metadata),
)
@classmethod
def _sa_deferred_table_resolver(cls, engine, metadata):
def _resolve(key):
t1 = Table(key, metadata)
cls._reflect_table(t1, engine)
return t1
return _resolve
@classmethod
def _sa_decl_prepare(cls, local_table, engine):
# autoload Table, which is already
# present in the metadata. This
# will fill in db-loaded columns
# into the existing Table object.
if local_table is not None:
cls._reflect_table(local_table, engine)
@classmethod
def _sa_raise_deferred_config(cls):
raise orm_exc.UnmappedClassError(
cls,
msg="Class %s is a subclass of DeferredReflection. "
"Mappings are not produced until the .prepare() "
"method is called on the class hierarchy."
% orm_exc._safe_cls_name(cls),
)
@classmethod
def _reflect_table(cls, table, engine):
Table(
table.name,
table.metadata,
extend_existing=True,
autoload_replace=False,
autoload=True,
autoload_with=engine,
schema=table.schema,
)
@inspection._inspects(DeclarativeMeta)
def _inspect_decl_meta(cls):
mp = _inspect_mapped_class(cls)
if mp is None:
if _DeferredMapperConfig.has_cls(cls):
_DeferredMapperConfig.raise_unmapped_for_cls(cls)
raise orm_exc.UnmappedClassError(
cls,
msg="Class %s has a deferred mapping on it. It is not yet "
"usable as a mapped class." % orm_exc._safe_cls_name(cls),
)
return mp
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/ext/declarative/base.py | # ext/declarative/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Internal implementation for declarative."""
import collections
import weakref
from sqlalchemy.orm import instrumentation
from . import clsregistry
from ... import event
from ... import exc
from ... import util
from ...orm import class_mapper
from ...orm import exc as orm_exc
from ...orm import mapper
from ...orm import mapperlib
from ...orm import synonym
from ...orm.attributes import QueryableAttribute
from ...orm.base import _is_mapped_class
from ...orm.base import InspectionAttr
from ...orm.interfaces import MapperProperty
from ...orm.properties import ColumnProperty
from ...orm.properties import CompositeProperty
from ...schema import Column
from ...schema import Table
from ...sql import expression
from ...util import topological
declared_attr = declarative_props = None
def _declared_mapping_info(cls):
# deferred mapping
if _DeferredMapperConfig.has_cls(cls):
return _DeferredMapperConfig.config_for_cls(cls)
# regular mapping
elif _is_mapped_class(cls):
return class_mapper(cls, configure=False)
else:
return None
def _resolve_for_abstract_or_classical(cls):
if cls is object:
return None
if _get_immediate_cls_attr(cls, "__abstract__", strict=True):
for sup in cls.__bases__:
sup = _resolve_for_abstract_or_classical(sup)
if sup is not None:
return sup
else:
return None
else:
classical = _dive_for_classically_mapped_class(cls)
if classical is not None:
return classical
else:
return cls
def _dive_for_classically_mapped_class(cls):
# support issue #4321
# if we are within a base hierarchy, don't
# search at all for classical mappings
if hasattr(cls, "_decl_class_registry"):
return None
manager = instrumentation.manager_of_class(cls)
if manager is not None:
return cls
else:
for sup in cls.__bases__:
mapper = _dive_for_classically_mapped_class(sup)
if mapper is not None:
return sup
else:
return None
def _get_immediate_cls_attr(cls, attrname, strict=False):
"""return an attribute of the class that is either present directly
on the class, e.g. not on a superclass, or is from a superclass but
this superclass is a non-mapped mixin, that is, not a descendant of
the declarative base and is also not classically mapped.
This is used to detect attributes that indicate something about
a mapped class independently from any mapped classes that it may
inherit from.
"""
if not issubclass(cls, object):
return None
for base in cls.__mro__:
_is_declarative_inherits = hasattr(base, "_decl_class_registry")
_is_classicial_inherits = (
not _is_declarative_inherits
and _dive_for_classically_mapped_class(base) is not None
)
if attrname in base.__dict__ and (
base is cls
or (
(base in cls.__bases__ if strict else True)
and not _is_declarative_inherits
and not _is_classicial_inherits
)
):
return getattr(base, attrname)
else:
return None
def _as_declarative(cls, classname, dict_):
global declared_attr, declarative_props
if declared_attr is None:
from .api import declared_attr
declarative_props = (declared_attr, util.classproperty)
if _get_immediate_cls_attr(cls, "__abstract__", strict=True):
return
_MapperConfig.setup_mapping(cls, classname, dict_)
def _check_declared_props_nocascade(obj, name, cls):
if isinstance(obj, declarative_props):
if getattr(obj, "_cascading", False):
util.warn(
"@declared_attr.cascading is not supported on the %s "
"attribute on class %s. This attribute invokes for "
"subclasses in any case." % (name, cls)
)
return True
else:
return False
class _MapperConfig(object):
@classmethod
def setup_mapping(cls, cls_, classname, dict_):
defer_map = _get_immediate_cls_attr(
cls_, "_sa_decl_prepare_nocascade", strict=True
) or hasattr(cls_, "_sa_decl_prepare")
if defer_map:
cfg_cls = _DeferredMapperConfig
else:
cfg_cls = _MapperConfig
cfg_cls(cls_, classname, dict_)
def __init__(self, cls_, classname, dict_):
self.cls = cls_
# dict_ will be a dictproxy, which we can't write to, and we need to!
self.dict_ = dict(dict_)
self.classname = classname
self.persist_selectable = None
self.properties = util.OrderedDict()
self.declared_columns = set()
self.column_copies = {}
self._setup_declared_events()
# temporary registry. While early 1.0 versions
# set up the ClassManager here, by API contract
# we can't do that until there's a mapper.
self.cls._sa_declared_attr_reg = {}
self._scan_attributes()
mapperlib._CONFIGURE_MUTEX.acquire()
try:
clsregistry.add_class(self.classname, self.cls)
self._extract_mappable_attributes()
self._extract_declared_columns()
self._setup_table()
self._setup_inheritance()
self._early_mapping()
finally:
mapperlib._CONFIGURE_MUTEX.release()
def _early_mapping(self):
self.map()
def _setup_declared_events(self):
if _get_immediate_cls_attr(self.cls, "__declare_last__"):
@event.listens_for(mapper, "after_configured")
def after_configured():
self.cls.__declare_last__()
if _get_immediate_cls_attr(self.cls, "__declare_first__"):
@event.listens_for(mapper, "before_configured")
def before_configured():
self.cls.__declare_first__()
def _scan_attributes(self):
cls = self.cls
dict_ = self.dict_
column_copies = self.column_copies
mapper_args_fn = None
table_args = inherited_table_args = None
tablename = None
for base in cls.__mro__:
class_mapped = (
base is not cls
and _declared_mapping_info(base) is not None
and not _get_immediate_cls_attr(
base, "_sa_decl_prepare_nocascade", strict=True
)
)
if not class_mapped and base is not cls:
self._produce_column_copies(base)
for name, obj in vars(base).items():
if name == "__mapper_args__":
check_decl = _check_declared_props_nocascade(
obj, name, cls
)
if not mapper_args_fn and (not class_mapped or check_decl):
# don't even invoke __mapper_args__ until
# after we've determined everything about the
# mapped table.
# make a copy of it so a class-level dictionary
# is not overwritten when we update column-based
# arguments.
def mapper_args_fn():
return dict(cls.__mapper_args__)
elif name == "__tablename__":
check_decl = _check_declared_props_nocascade(
obj, name, cls
)
if not tablename and (not class_mapped or check_decl):
tablename = cls.__tablename__
elif name == "__table_args__":
check_decl = _check_declared_props_nocascade(
obj, name, cls
)
if not table_args and (not class_mapped or check_decl):
table_args = cls.__table_args__
if not isinstance(
table_args, (tuple, dict, type(None))
):
raise exc.ArgumentError(
"__table_args__ value must be a tuple, "
"dict, or None"
)
if base is not cls:
inherited_table_args = True
elif class_mapped:
if isinstance(obj, declarative_props):
util.warn(
"Regular (i.e. not __special__) "
"attribute '%s.%s' uses @declared_attr, "
"but owning class %s is mapped - "
"not applying to subclass %s."
% (base.__name__, name, base, cls)
)
continue
elif base is not cls:
# we're a mixin, abstract base, or something that is
# acting like that for now.
if isinstance(obj, Column):
# already copied columns to the mapped class.
continue
elif isinstance(obj, MapperProperty):
raise exc.InvalidRequestError(
"Mapper properties (i.e. deferred,"
"column_property(), relationship(), etc.) must "
"be declared as @declared_attr callables "
"on declarative mixin classes."
)
elif isinstance(obj, declarative_props):
oldclassprop = isinstance(obj, util.classproperty)
if not oldclassprop and obj._cascading:
if name in dict_:
# unfortunately, while we can use the user-
# defined attribute here to allow a clean
# override, if there's another
# subclass below then it still tries to use
# this. not sure if there is enough
# information here to add this as a feature
# later on.
util.warn(
"Attribute '%s' on class %s cannot be "
"processed due to "
"@declared_attr.cascading; "
"skipping" % (name, cls)
)
dict_[name] = column_copies[
obj
] = ret = obj.__get__(obj, cls)
setattr(cls, name, ret)
else:
if oldclassprop:
util.warn_deprecated(
"Use of sqlalchemy.util.classproperty on "
"declarative classes is deprecated."
)
# access attribute using normal class access
ret = getattr(cls, name)
# correct for proxies created from hybrid_property
# or similar. note there is no known case that
# produces nested proxies, so we are only
# looking one level deep right now.
if (
isinstance(ret, InspectionAttr)
and ret._is_internal_proxy
and not isinstance(
ret.original_property, MapperProperty
)
):
ret = ret.descriptor
dict_[name] = column_copies[obj] = ret
if (
isinstance(ret, (Column, MapperProperty))
and ret.doc is None
):
ret.doc = obj.__doc__
# here, the attribute is some other kind of property that
# we assume is not part of the declarative mapping.
# however, check for some more common mistakes
else:
self._warn_for_decl_attributes(base, name, obj)
if inherited_table_args and not tablename:
table_args = None
self.table_args = table_args
self.tablename = tablename
self.mapper_args_fn = mapper_args_fn
def _warn_for_decl_attributes(self, cls, key, c):
if isinstance(c, expression.ColumnClause):
util.warn(
"Attribute '%s' on class %s appears to be a non-schema "
"'sqlalchemy.sql.column()' "
"object; this won't be part of the declarative mapping"
% (key, cls)
)
def _produce_column_copies(self, base):
cls = self.cls
dict_ = self.dict_
column_copies = self.column_copies
# copy mixin columns to the mapped class
for name, obj in vars(base).items():
if isinstance(obj, Column):
if getattr(cls, name) is not obj:
# if column has been overridden
# (like by the InstrumentedAttribute of the
# superclass), skip
continue
elif obj.foreign_keys:
raise exc.InvalidRequestError(
"Columns with foreign keys to other columns "
"must be declared as @declared_attr callables "
"on declarative mixin classes. "
)
elif name not in dict_ and not (
"__table__" in dict_
and (obj.name or name) in dict_["__table__"].c
):
column_copies[obj] = copy_ = obj.copy()
copy_._creation_order = obj._creation_order
setattr(cls, name, copy_)
dict_[name] = copy_
def _extract_mappable_attributes(self):
cls = self.cls
dict_ = self.dict_
our_stuff = self.properties
late_mapped = _get_immediate_cls_attr(
cls, "_sa_decl_prepare_nocascade", strict=True
)
for k in list(dict_):
if k in ("__table__", "__tablename__", "__mapper_args__"):
continue
value = dict_[k]
if isinstance(value, declarative_props):
if isinstance(value, declared_attr) and value._cascading:
util.warn(
"Use of @declared_attr.cascading only applies to "
"Declarative 'mixin' and 'abstract' classes. "
"Currently, this flag is ignored on mapped class "
"%s" % self.cls
)
value = getattr(cls, k)
elif (
isinstance(value, QueryableAttribute)
and value.class_ is not cls
and value.key != k
):
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
setattr(cls, k, value)
if (
isinstance(value, tuple)
and len(value) == 1
and isinstance(value[0], (Column, MapperProperty))
):
util.warn(
"Ignoring declarative-like tuple value of attribute "
"'%s': possibly a copy-and-paste error with a comma "
"accidentally placed at the end of the line?" % k
)
continue
elif not isinstance(value, (Column, MapperProperty)):
# using @declared_attr for some object that
# isn't Column/MapperProperty; remove from the dict_
# and place the evaluated value onto the class.
if not k.startswith("__"):
dict_.pop(k)
self._warn_for_decl_attributes(cls, k, value)
if not late_mapped:
setattr(cls, k, value)
continue
# we expect to see the name 'metadata' in some valid cases;
# however at this point we see it's assigned to something trying
# to be mapped, so raise for that.
elif k == "metadata":
raise exc.InvalidRequestError(
"Attribute name 'metadata' is reserved "
"for the MetaData instance when using a "
"declarative base class."
)
prop = clsregistry._deferred_relationship(cls, value)
our_stuff[k] = prop
def _extract_declared_columns(self):
our_stuff = self.properties
# set up attributes in the order they were created
our_stuff.sort(key=lambda key: our_stuff[key]._creation_order)
# extract columns from the class dict
declared_columns = self.declared_columns
name_to_prop_key = collections.defaultdict(set)
for key, c in list(our_stuff.items()):
if isinstance(c, (ColumnProperty, CompositeProperty)):
for col in c.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
if not isinstance(c, CompositeProperty):
name_to_prop_key[col.name].add(key)
declared_columns.add(col)
elif isinstance(c, Column):
_undefer_column_name(key, c)
name_to_prop_key[c.name].add(key)
declared_columns.add(c)
# if the column is the same name as the key,
# remove it from the explicit properties dict.
# the normal rules for assigning column-based properties
# will take over, including precedence of columns
# in multi-column ColumnProperties.
if key == c.key:
del our_stuff[key]
for name, keys in name_to_prop_key.items():
if len(keys) > 1:
util.warn(
"On class %r, Column object %r named "
"directly multiple times, "
"only one will be used: %s. "
"Consider using orm.synonym instead"
% (self.classname, name, (", ".join(sorted(keys))))
)
def _setup_table(self):
cls = self.cls
tablename = self.tablename
table_args = self.table_args
dict_ = self.dict_
declared_columns = self.declared_columns
declared_columns = self.declared_columns = sorted(
declared_columns, key=lambda c: c._creation_order
)
table = None
if hasattr(cls, "__table_cls__"):
table_cls = util.unbound_method_to_callable(cls.__table_cls__)
else:
table_cls = Table
if "__table__" not in dict_:
if tablename is not None:
args, table_kw = (), {}
if table_args:
if isinstance(table_args, dict):
table_kw = table_args
elif isinstance(table_args, tuple):
if isinstance(table_args[-1], dict):
args, table_kw = table_args[0:-1], table_args[-1]
else:
args = table_args
autoload = dict_.get("__autoload__")
if autoload:
table_kw["autoload"] = True
cls.__table__ = table = table_cls(
tablename,
cls.metadata,
*(tuple(declared_columns) + tuple(args)),
**table_kw
)
else:
table = cls.__table__
if declared_columns:
for c in declared_columns:
if not table.c.contains_column(c):
raise exc.ArgumentError(
"Can't add additional column %r when "
"specifying __table__" % c.key
)
self.local_table = table
def _setup_inheritance(self):
table = self.local_table
cls = self.cls
table_args = self.table_args
declared_columns = self.declared_columns
# since we search for classical mappings now, search for
# multiple mapped bases as well and raise an error.
inherits = []
for c in cls.__bases__:
c = _resolve_for_abstract_or_classical(c)
if c is None:
continue
if _declared_mapping_info(
c
) is not None and not _get_immediate_cls_attr(
c, "_sa_decl_prepare_nocascade", strict=True
):
inherits.append(c)
if inherits:
if len(inherits) > 1:
raise exc.InvalidRequestError(
"Class %s has multiple mapped bases: %r" % (cls, inherits)
)
self.inherits = inherits[0]
else:
self.inherits = None
if (
table is None
and self.inherits is None
and not _get_immediate_cls_attr(cls, "__no_table__")
):
raise exc.InvalidRequestError(
"Class %r does not have a __table__ or __tablename__ "
"specified and does not inherit from an existing "
"table-mapped class." % cls
)
elif self.inherits:
inherited_mapper = _declared_mapping_info(self.inherits)
inherited_table = inherited_mapper.local_table
inherited_persist_selectable = inherited_mapper.persist_selectable
if table is None:
# single table inheritance.
# ensure no table args
if table_args:
raise exc.ArgumentError(
"Can't place __table_args__ on an inherited class "
"with no table."
)
# add any columns declared here to the inherited table.
for c in declared_columns:
if c.name in inherited_table.c:
if inherited_table.c[c.name] is c:
continue
raise exc.ArgumentError(
"Column '%s' on class %s conflicts with "
"existing column '%s'"
% (c, cls, inherited_table.c[c.name])
)
if c.primary_key:
raise exc.ArgumentError(
"Can't place primary key columns on an inherited "
"class with no table."
)
inherited_table.append_column(c)
if (
inherited_persist_selectable is not None
and inherited_persist_selectable is not inherited_table
):
inherited_persist_selectable._refresh_for_new_column(c)
def _prepare_mapper_arguments(self):
properties = self.properties
if self.mapper_args_fn:
mapper_args = self.mapper_args_fn()
else:
mapper_args = {}
# make sure that column copies are used rather
# than the original columns from any mixins
for k in ("version_id_col", "polymorphic_on"):
if k in mapper_args:
v = mapper_args[k]
mapper_args[k] = self.column_copies.get(v, v)
assert (
"inherits" not in mapper_args
), "Can't specify 'inherits' explicitly with declarative mappings"
if self.inherits:
mapper_args["inherits"] = self.inherits
if self.inherits and not mapper_args.get("concrete", False):
# single or joined inheritance
# exclude any cols on the inherited table which are
# not mapped on the parent class, to avoid
# mapping columns specific to sibling/nephew classes
inherited_mapper = _declared_mapping_info(self.inherits)
inherited_table = inherited_mapper.local_table
if "exclude_properties" not in mapper_args:
mapper_args["exclude_properties"] = exclude_properties = set(
[
c.key
for c in inherited_table.c
if c not in inherited_mapper._columntoproperty
]
).union(inherited_mapper.exclude_properties or ())
exclude_properties.difference_update(
[c.key for c in self.declared_columns]
)
# look through columns in the current mapper that
# are keyed to a propname different than the colname
# (if names were the same, we'd have popped it out above,
# in which case the mapper makes this combination).
# See if the superclass has a similar column property.
# If so, join them together.
for k, col in list(properties.items()):
if not isinstance(col, expression.ColumnElement):
continue
if k in inherited_mapper._props:
p = inherited_mapper._props[k]
if isinstance(p, ColumnProperty):
# note here we place the subclass column
# first. See [ticket:1892] for background.
properties[k] = [col] + p.columns
result_mapper_args = mapper_args.copy()
result_mapper_args["properties"] = properties
self.mapper_args = result_mapper_args
def map(self):
self._prepare_mapper_arguments()
if hasattr(self.cls, "__mapper_cls__"):
mapper_cls = util.unbound_method_to_callable(
self.cls.__mapper_cls__
)
else:
mapper_cls = mapper
self.cls.__mapper__ = mp_ = mapper_cls(
self.cls, self.local_table, **self.mapper_args
)
del self.cls._sa_declared_attr_reg
return mp_
class _DeferredMapperConfig(_MapperConfig):
_configs = util.OrderedDict()
def _early_mapping(self):
pass
@property
def cls(self):
return self._cls()
@cls.setter
def cls(self, class_):
self._cls = weakref.ref(class_, self._remove_config_cls)
self._configs[self._cls] = self
@classmethod
def _remove_config_cls(cls, ref):
cls._configs.pop(ref, None)
@classmethod
def has_cls(cls, class_):
# 2.6 fails on weakref if class_ is an old style class
return isinstance(class_, type) and weakref.ref(class_) in cls._configs
@classmethod
def raise_unmapped_for_cls(cls, class_):
if hasattr(class_, "_sa_raise_deferred_config"):
class_._sa_raise_deferred_config()
raise orm_exc.UnmappedClassError(
class_,
msg="Class %s has a deferred mapping on it. It is not yet "
"usable as a mapped class." % orm_exc._safe_cls_name(class_),
)
@classmethod
def config_for_cls(cls, class_):
return cls._configs[weakref.ref(class_)]
@classmethod
def classes_for_base(cls, base_cls, sort=True):
classes_for_base = [
m
for m, cls_ in [(m, m.cls) for m in cls._configs.values()]
if cls_ is not None and issubclass(cls_, base_cls)
]
if not sort:
return classes_for_base
all_m_by_cls = dict((m.cls, m) for m in classes_for_base)
tuples = []
for m_cls in all_m_by_cls:
tuples.extend(
(all_m_by_cls[base_cls], all_m_by_cls[m_cls])
for base_cls in m_cls.__bases__
if base_cls in all_m_by_cls
)
return list(topological.sort(tuples, classes_for_base))
def map(self):
self._configs.pop(self._cls, None)
return super(_DeferredMapperConfig, self).map()
def _add_attribute(cls, key, value):
"""add an attribute to an existing declarative class.
This runs through the logic to determine MapperProperty,
adds it to the Mapper, adds a column to the mapped Table, etc.
"""
if "__mapper__" in cls.__dict__:
if isinstance(value, Column):
_undefer_column_name(key, value)
cls.__table__.append_column(value)
cls.__mapper__.add_property(key, value)
elif isinstance(value, ColumnProperty):
for col in value.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
cls.__table__.append_column(col)
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(
key, clsregistry._deferred_relationship(cls, value)
)
elif isinstance(value, QueryableAttribute) and value.key != key:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = synonym(value.key)
cls.__mapper__.add_property(
key, clsregistry._deferred_relationship(cls, value)
)
else:
type.__setattr__(cls, key, value)
cls.__mapper__._expire_memoizations()
else:
type.__setattr__(cls, key, value)
def _del_attribute(cls, key):
if (
"__mapper__" in cls.__dict__
and key in cls.__dict__
and not cls.__mapper__._dispose_called
):
value = cls.__dict__[key]
if isinstance(
value, (Column, ColumnProperty, MapperProperty, QueryableAttribute)
):
raise NotImplementedError(
"Can't un-map individual mapped attributes on a mapped class."
)
else:
type.__delattr__(cls, key)
cls.__mapper__._expire_memoizations()
else:
type.__delattr__(cls, key)
def _declarative_constructor(self, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" % (k, cls_.__name__)
)
setattr(self, k, kwargs[k])
_declarative_constructor.__name__ = "__init__"
def _undefer_column_name(key, column):
if column.key is None:
column.key = key
if column.name is None:
column.name = key
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/pickleable.py | # testing/pickleable.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Classes used in pickling tests, need to be at the module level for
unpickling.
"""
from . import fixtures
class User(fixtures.ComparableEntity):
pass
class Order(fixtures.ComparableEntity):
pass
class Dingaling(fixtures.ComparableEntity):
pass
class EmailUser(User):
pass
class Address(fixtures.ComparableEntity):
pass
# TODO: these are kind of arbitrary....
class Child1(fixtures.ComparableEntity):
pass
class Child2(fixtures.ComparableEntity):
pass
class Parent(fixtures.ComparableEntity):
pass
class Screen(object):
def __init__(self, obj, parent=None):
self.obj = obj
self.parent = parent
class Foo(object):
def __init__(self, moredata):
self.data = "im data"
self.stuff = "im stuff"
self.moredata = moredata
__hash__ = object.__hash__
def __eq__(self, other):
return (
other.data == self.data
and other.stuff == self.stuff
and other.moredata == self.moredata
)
class Bar(object):
def __init__(self, x, y):
self.x = x
self.y = y
__hash__ = object.__hash__
def __eq__(self, other):
return (
other.__class__ is self.__class__
and other.x == self.x
and other.y == self.y
)
def __str__(self):
return "Bar(%d, %d)" % (self.x, self.y)
class OldSchool:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return (
other.__class__ is self.__class__
and other.x == self.x
and other.y == self.y
)
class OldSchoolWithoutCompare:
def __init__(self, x, y):
self.x = x
self.y = y
class BarWithoutCompare(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "Bar(%d, %d)" % (self.x, self.y)
class NotComparable(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return id(self)
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return NotImplemented
class BrokenComparable(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return id(self)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/exclusions.py | # testing/exclusions.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import contextlib
import operator
import re
import sys
from . import config
from .. import util
from ..util import decorator
from ..util.compat import inspect_getfullargspec
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
self.tags = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
copy.tags.update(self.tags)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
copy.tags.update(other.tags)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
copy.tags.update(self.tags)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config)
for predicate in self.skips.union(self.fails)
if predicate(config)
]
def include_test(self, include_tags, exclude_tags):
return bool(
not self.tags.intersection(exclude_tags)
and (not include_tags or self.tags.intersection(include_tags))
)
def _extend(self, other):
self.skips.update(other.skips)
self.fails.update(other.fails)
self.tags.update(other.tags)
def __call__(self, fn):
if hasattr(fn, "_sa_exclusion_extend"):
fn._sa_exclusion_extend._extend(self)
return fn
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = self
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, cfg, fn, *args, **kw):
for skip in self.skips:
if skip(cfg):
msg = "'%s' : %s" % (
config.get_current_test_name(),
skip._as_string(cfg),
)
config.skip_test(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(cfg, ex, name=fn.__name__)
else:
self._expect_success(cfg, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name="block"):
for fail in self.fails:
if fail(config):
if util.py2k:
str_ex = unicode(ex).encode("utf-8", errors="ignore")
else:
str_ex = str(ex)
print(
(
"%s failed as expected (%s): %s "
% (name, fail._as_string(config), str_ex)
)
)
break
else:
util.raise_(ex, with_traceback=sys.exc_info()[2])
def _expect_success(self, config, name="block"):
if not self.fails:
return
for fail in self.fails:
if fail(config):
raise AssertionError(
"Unexpected success for '%s' (%s)"
% (
name,
" and ".join(
fail._as_string(config) for fail in self.fails
),
)
)
def requires_tag(tagname):
return tags([tagname])
def tags(tagnames):
comp = compound()
comp.tags.update(tagnames)
return comp
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.enabled_for_config, description)
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate], description
)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, util.string_types):
tokens = re.match(
r"([\+\w]+)\s*(?:(>=|==|!=|<=|<|>)\s*([\d\.]+))?", predicate
)
if not tokens:
raise ValueError(
"Couldn't locate DB name in predicate: %r" % predicate
)
db = tokens.group(1)
op = tokens.group(2)
spec = (
tuple(int(d) for d in tokens.group(3).split("."))
if tokens.group(3)
else None
)
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": config.db.url.get_driver_name()
if config
else "<no driver>",
"database": config.db.url.get_backend_name()
if config
else "<no database>",
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support",
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
"<": operator.lt,
">": operator.gt,
"==": operator.eq,
"!=": operator.ne,
"<=": operator.le,
">=": operator.ge,
"in": operator.contains,
"between": lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split("+")
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = (
hasattr(self.op, "__call__") and self.op or self._ops[self.op]
)
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (self.db, self.op, self.spec)
else:
return "%s %s %s" % (self.db, self.op, self.spec)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect_getfullargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(
p._as_string(config, negate=negate) for p in self.predicates
)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, "server_version_info", None)
if version is None:
version = ()
conn.close()
return version
def db_spec(*dbs):
return OrPredicate([Predicate.as_predicate(db) for db in dbs])
def open(): # noqa
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails(reason=None):
return fails_if(BooleanPredicate(True, reason or "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(db, reason)
def fails_on_everything_except(*dbs):
return succeeds_if(OrPredicate([Predicate.as_predicate(db) for db in dbs]))
def skip(db, reason=None):
return skip_if(db, reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate(
[Predicate.as_predicate(db, reason) for db in util.to_list(dbs)]
)
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([Predicate.as_predicate(query) for query in queries])(
config
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/config.py | # testing/config.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import collections
requirements = None
db = None
db_url = None
db_opts = None
file_config = None
test_schema = None
test_schema_2 = None
_current = None
_fixture_functions = None # installed by plugin_base
def combinations(*comb, **kw):
r"""Deliver multiple versions of a test based on positional combinations.
This is a facade over pytest.mark.parametrize.
:param \*comb: argument combinations. These are tuples that will be passed
positionally to the decorated function.
:param argnames: optional list of argument names. These are the names
of the arguments in the test function that correspond to the entries
in each argument tuple. pytest.mark.parametrize requires this, however
the combinations function will derive it automatically if not present
by using ``inspect.getfullargspec(fn).args[1:]``. Note this assumes the
first argument is "self" which is discarded.
:param id\_: optional id template. This is a string template that
describes how the "id" for each parameter set should be defined, if any.
The number of characters in the template should match the number of
entries in each argument tuple. Each character describes how the
corresponding entry in the argument tuple should be handled, as far as
whether or not it is included in the arguments passed to the function, as
well as if it is included in the tokens used to create the id of the
parameter set.
If omitted, the argument combinations are passed to parametrize as is. If
passed, each argument combination is turned into a pytest.param() object,
mapping the elements of the argument tuple to produce an id based on a
character value in the same position within the string template using the
following scheme::
i - the given argument is a string that is part of the id only, don't
pass it as an argument
n - the given argument should be passed and it should be added to the
id by calling the .__name__ attribute
r - the given argument should be passed and it should be added to the
id by calling repr()
s - the given argument should be passed and it should be added to the
id by calling str()
a - (argument) the given argument should be passed and it should not
be used to generated the id
e.g.::
@testing.combinations(
(operator.eq, "eq"),
(operator.ne, "ne"),
(operator.gt, "gt"),
(operator.lt, "lt"),
id_="na"
)
def test_operator(self, opfunc, name):
pass
The above combination will call ``.__name__`` on the first member of
each tuple and use that as the "id" to pytest.param().
"""
return _fixture_functions.combinations(*comb, **kw)
def fixture(*arg, **kw):
return _fixture_functions.fixture(*arg, **kw)
def get_current_test_name():
return _fixture_functions.get_current_test_name()
class Config(object):
def __init__(self, db, db_opts, options, file_config):
self._set_name(db)
self.db = db
self.db_opts = db_opts
self.options = options
self.file_config = file_config
self.test_schema = "test_schema"
self.test_schema_2 = "test_schema_2"
_stack = collections.deque()
_configs = set()
def _set_name(self, db):
if db.dialect.server_version_info:
svi = ".".join(str(tok) for tok in db.dialect.server_version_info)
self.name = "%s+%s_[%s]" % (db.name, db.driver, svi)
else:
self.name = "%s+%s" % (db.name, db.driver)
@classmethod
def register(cls, db, db_opts, options, file_config):
"""add a config as one of the global configs.
If there are no configs set up yet, this config also
gets set as the "_current".
"""
cfg = Config(db, db_opts, options, file_config)
cls._configs.add(cfg)
return cfg
@classmethod
def set_as_current(cls, config, namespace):
global db, _current, db_url, test_schema, test_schema_2, db_opts
_current = config
db_url = config.db.url
db_opts = config.db_opts
test_schema = config.test_schema
test_schema_2 = config.test_schema_2
namespace.db = db = config.db
@classmethod
def push_engine(cls, db, namespace):
assert _current, "Can't push without a default Config set up"
cls.push(
Config(
db, _current.db_opts, _current.options, _current.file_config
),
namespace,
)
@classmethod
def push(cls, config, namespace):
cls._stack.append(_current)
cls.set_as_current(config, namespace)
@classmethod
def reset(cls, namespace):
if cls._stack:
cls.set_as_current(cls._stack[0], namespace)
cls._stack.clear()
@classmethod
def all_configs(cls):
return cls._configs
@classmethod
def all_dbs(cls):
for cfg in cls.all_configs():
yield cfg.db
def skip_test(self, msg):
skip_test(msg)
def skip_test(msg):
raise _fixture_functions.skip_test_exception(msg)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/engines.py | # testing/engines.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import re
import warnings
import weakref
from . import config
from . import uses_deprecated
from .util import decorator
from .. import event
from .. import pool
class ConnectionKiller(object):
def __init__(self):
self.proxy_refs = weakref.WeakKeyDictionary()
self.testing_engines = weakref.WeakKeyDictionary()
self.conns = set()
def add_engine(self, engine):
self.testing_engines[engine] = True
def connect(self, dbapi_conn, con_record):
self.conns.add((dbapi_conn, con_record))
def checkout(self, dbapi_con, con_record, con_proxy):
self.proxy_refs[con_proxy] = True
def invalidate(self, dbapi_con, con_record, exception):
self.conns.discard((dbapi_con, con_record))
def _safe(self, fn):
try:
fn()
except Exception as e:
warnings.warn(
"testing_reaper couldn't " "rollback/close connection: %s" % e
)
def rollback_all(self):
for rec in list(self.proxy_refs):
if rec is not None and rec.is_valid:
self._safe(rec.rollback)
def close_all(self):
for rec in list(self.proxy_refs):
if rec is not None and rec.is_valid:
self._safe(rec._close)
def _after_test_ctx(self):
# this can cause a deadlock with pg8000 - pg8000 acquires
# prepared statement lock inside of rollback() - if async gc
# is collecting in finalize_fairy, deadlock.
# not sure if this should be if pypy/jython only.
# note that firebird/fdb definitely needs this though
for conn, rec in list(self.conns):
if rec.connection is None:
# this is a hint that the connection is closed, which
# is causing segfaults on mysqlclient due to
# https://github.com/PyMySQL/mysqlclient-python/issues/270;
# try to work around here
continue
self._safe(conn.rollback)
def _stop_test_ctx(self):
if config.options.low_connections:
self._stop_test_ctx_minimal()
else:
self._stop_test_ctx_aggressive()
@uses_deprecated()
def _stop_test_ctx_minimal(self):
self.close_all()
self.conns = set()
for rec in list(self.testing_engines):
if rec is not config.db:
rec.dispose()
@uses_deprecated()
def _stop_test_ctx_aggressive(self):
self.close_all()
for conn, rec in list(self.conns):
self._safe(conn.close)
rec.connection = None
self.conns = set()
for rec in list(self.testing_engines):
rec.dispose()
def assert_all_closed(self):
for rec in self.proxy_refs:
if rec.is_valid:
assert False
testing_reaper = ConnectionKiller()
def drop_all_tables(metadata, bind):
testing_reaper.close_all()
if hasattr(bind, "close"):
bind.close()
if not config.db.dialect.supports_alter:
from . import assertions
with assertions.expect_warnings("Can't sort tables", assert_=False):
metadata.drop_all(bind)
else:
metadata.drop_all(bind)
@decorator
def assert_conns_closed(fn, *args, **kw):
try:
fn(*args, **kw)
finally:
testing_reaper.assert_all_closed()
@decorator
def rollback_open_connections(fn, *args, **kw):
"""Decorator that rolls back all open connections after fn execution."""
try:
fn(*args, **kw)
finally:
testing_reaper.rollback_all()
@decorator
def close_first(fn, *args, **kw):
"""Decorator that closes all connections before fn execution."""
testing_reaper.close_all()
fn(*args, **kw)
@decorator
def close_open_connections(fn, *args, **kw):
"""Decorator that closes all connections after fn execution."""
try:
fn(*args, **kw)
finally:
testing_reaper.close_all()
def all_dialects(exclude=None):
import sqlalchemy.databases as d
for name in d.__all__:
# TEMPORARY
if exclude and name in exclude:
continue
mod = getattr(d, name, None)
if not mod:
mod = getattr(
__import__("sqlalchemy.databases.%s" % name).databases, name
)
yield mod.dialect()
class ReconnectFixture(object):
def __init__(self, dbapi):
self.dbapi = dbapi
self.connections = []
self.is_stopped = False
def __getattr__(self, key):
return getattr(self.dbapi, key)
def connect(self, *args, **kwargs):
conn = self.dbapi.connect(*args, **kwargs)
if self.is_stopped:
self._safe(conn.close)
curs = conn.cursor() # should fail on Oracle etc.
# should fail for everything that didn't fail
# above, connection is closed
curs.execute("select 1")
assert False, "simulated connect failure didn't work"
else:
self.connections.append(conn)
return conn
def _safe(self, fn):
try:
fn()
except Exception as e:
warnings.warn(
"ReconnectFixture couldn't " "close connection: %s" % e
)
def shutdown(self, stop=False):
# TODO: this doesn't cover all cases
# as nicely as we'd like, namely MySQLdb.
# would need to implement R. Brewer's
# proxy server idea to get better
# coverage.
self.is_stopped = stop
for c in list(self.connections):
self._safe(c.close)
self.connections = []
def restart(self):
self.is_stopped = False
def reconnecting_engine(url=None, options=None):
url = url or config.db.url
dbapi = config.db.dialect.dbapi
if not options:
options = {}
options["module"] = ReconnectFixture(dbapi)
engine = testing_engine(url, options)
_dispose = engine.dispose
def dispose():
engine.dialect.dbapi.shutdown()
engine.dialect.dbapi.is_stopped = False
_dispose()
engine.test_shutdown = engine.dialect.dbapi.shutdown
engine.test_restart = engine.dialect.dbapi.restart
engine.dispose = dispose
return engine
def testing_engine(url=None, options=None):
"""Produce an engine configured by --options with optional overrides."""
from sqlalchemy import create_engine
from sqlalchemy.engine.url import make_url
if not options:
use_reaper = True
else:
use_reaper = options.pop("use_reaper", True)
url = url or config.db.url
url = make_url(url)
if options is None:
if config.db is None or url.drivername == config.db.url.drivername:
options = config.db_opts
else:
options = {}
elif config.db is not None and url.drivername == config.db.url.drivername:
default_opt = config.db_opts.copy()
default_opt.update(options)
engine = create_engine(url, **options)
engine._has_events = True # enable event blocks, helps with profiling
if isinstance(engine.pool, pool.QueuePool):
engine.pool._timeout = 0
engine.pool._max_overflow = 0
if use_reaper:
event.listen(engine.pool, "connect", testing_reaper.connect)
event.listen(engine.pool, "checkout", testing_reaper.checkout)
event.listen(engine.pool, "invalidate", testing_reaper.invalidate)
testing_reaper.add_engine(engine)
return engine
def mock_engine(dialect_name=None):
"""Provides a mocking engine based on the current testing.db.
This is normally used to test DDL generation flow as emitted
by an Engine.
It should not be used in other cases, as assert_compile() and
assert_sql_execution() are much better choices with fewer
moving parts.
"""
from sqlalchemy import create_engine
if not dialect_name:
dialect_name = config.db.name
buffer = []
def executor(sql, *a, **kw):
buffer.append(sql)
def assert_sql(stmts):
recv = [re.sub(r"[\n\t]", "", str(s)) for s in buffer]
assert recv == stmts, recv
def print_sql():
d = engine.dialect
return "\n".join(str(s.compile(dialect=d)) for s in engine.mock)
engine = create_engine(
dialect_name + "://", strategy="mock", executor=executor
)
assert not hasattr(engine, "mock")
engine.mock = buffer
engine.assert_sql = assert_sql
engine.print_sql = print_sql
return engine
class DBAPIProxyCursor(object):
"""Proxy a DBAPI cursor.
Tests can provide subclasses of this to intercept
DBAPI-level cursor operations.
"""
def __init__(self, engine, conn, *args, **kwargs):
self.engine = engine
self.connection = conn
self.cursor = conn.cursor(*args, **kwargs)
def execute(self, stmt, parameters=None, **kw):
if parameters:
return self.cursor.execute(stmt, parameters, **kw)
else:
return self.cursor.execute(stmt, **kw)
def executemany(self, stmt, params, **kw):
return self.cursor.executemany(stmt, params, **kw)
def __getattr__(self, key):
return getattr(self.cursor, key)
class DBAPIProxyConnection(object):
"""Proxy a DBAPI connection.
Tests can provide subclasses of this to intercept
DBAPI-level connection operations.
"""
def __init__(self, engine, cursor_cls):
self.conn = self._sqla_unwrap = engine.pool._creator()
self.engine = engine
self.cursor_cls = cursor_cls
def cursor(self, *args, **kwargs):
return self.cursor_cls(self.engine, self.conn, *args, **kwargs)
def close(self):
self.conn.close()
def __getattr__(self, key):
return getattr(self.conn, key)
def proxying_engine(
conn_cls=DBAPIProxyConnection, cursor_cls=DBAPIProxyCursor
):
"""Produce an engine that provides proxy hooks for
common methods.
"""
def mock_conn():
return conn_cls(config.db, cursor_cls)
return testing_engine(options={"creator": mock_conn})
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/util.py | # testing/util.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import decimal
import gc
import random
import sys
import time
import types
from ..util import decorator
from ..util import defaultdict
from ..util import inspect_getfullargspec
from ..util import jython
from ..util import py2k
from ..util import pypy
if jython:
def jython_gc_collect(*args):
"""aggressive gc.collect for tests."""
gc.collect()
time.sleep(0.1)
gc.collect()
gc.collect()
return 0
# "lazy" gc, for VM's that don't GC on refcount == 0
gc_collect = lazy_gc = jython_gc_collect
elif pypy:
def pypy_gc_collect(*args):
gc.collect()
gc.collect()
gc_collect = lazy_gc = pypy_gc_collect
else:
# assume CPython - straight gc.collect, lazy_gc() is a pass
gc_collect = gc.collect
def lazy_gc():
pass
def picklers():
picklers = set()
if py2k:
try:
import cPickle
picklers.add(cPickle)
except ImportError:
pass
import pickle
picklers.add(pickle)
# yes, this thing needs this much testing
for pickle_ in picklers:
for protocol in range(-2, pickle.HIGHEST_PROTOCOL):
yield pickle_.loads, lambda d: pickle_.dumps(d, protocol)
def round_decimal(value, prec):
if isinstance(value, float):
return round(value, prec)
# can also use shift() here but that is 2.6 only
return (value * decimal.Decimal("1" + "0" * prec)).to_integral(
decimal.ROUND_FLOOR
) / pow(10, prec)
class RandomSet(set):
def __iter__(self):
l = list(set.__iter__(self))
random.shuffle(l)
return iter(l)
def pop(self):
index = random.randint(0, len(self) - 1)
item = list(set.__iter__(self))[index]
self.remove(item)
return item
def union(self, other):
return RandomSet(set.union(self, other))
def difference(self, other):
return RandomSet(set.difference(self, other))
def intersection(self, other):
return RandomSet(set.intersection(self, other))
def copy(self):
return RandomSet(self)
def conforms_partial_ordering(tuples, sorted_elements):
"""True if the given sorting conforms to the given partial ordering."""
deps = defaultdict(set)
for parent, child in tuples:
deps[parent].add(child)
for i, node in enumerate(sorted_elements):
for n in sorted_elements[i:]:
if node in deps[n]:
return False
else:
return True
def all_partial_orderings(tuples, elements):
edges = defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
def _all_orderings(elements):
if len(elements) == 1:
yield list(elements)
else:
for elem in elements:
subset = set(elements).difference([elem])
if not subset.intersection(edges[elem]):
for sub_ordering in _all_orderings(subset):
yield [elem] + sub_ordering
return iter(_all_orderings(elements))
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
This function should be phased out as much as possible
in favor of @decorator. Tests that "generate" many named tests
should be modernized.
"""
try:
fn.__name__ = name
except TypeError:
fn = types.FunctionType(
fn.__code__, fn.__globals__, name, fn.__defaults__, fn.__closure__
)
return fn
def run_as_contextmanager(ctx, fn, *arg, **kw):
"""Run the given function under the given contextmanager,
simulating the behavior of 'with' to support older
Python versions.
This is not necessary anymore as we have placed 2.6
as minimum Python version, however some tests are still using
this structure.
"""
obj = ctx.__enter__()
try:
result = fn(obj, *arg, **kw)
ctx.__exit__(None, None, None)
return result
except:
exc_info = sys.exc_info()
raise_ = ctx.__exit__(*exc_info)
if not raise_:
raise
else:
return raise_
def rowset(results):
"""Converts the results of sql execution into a plain set of column tuples.
Useful for asserting the results of an unordered query.
"""
return {tuple(row) for row in results}
def fail(msg):
assert False, msg
@decorator
def provide_metadata(fn, *args, **kw):
"""Provide bound MetaData for a single test, dropping afterwards."""
from . import config
from . import engines
from sqlalchemy import schema
metadata = schema.MetaData(config.db)
self = args[0]
prev_meta = getattr(self, "metadata", None)
self.metadata = metadata
try:
return fn(*args, **kw)
finally:
engines.drop_all_tables(metadata, config.db)
self.metadata = prev_meta
def flag_combinations(*combinations):
"""A facade around @testing.combinations() oriented towards boolean
keyword-based arguments.
Basically generates a nice looking identifier based on the keywords
and also sets up the argument names.
E.g.::
@testing.flag_combinations(
dict(lazy=False, passive=False),
dict(lazy=True, passive=False),
dict(lazy=False, passive=True),
dict(lazy=False, passive=True, raiseload=True),
)
would result in::
@testing.combinations(
('', False, False, False),
('lazy', True, False, False),
('lazy_passive', True, True, False),
('lazy_passive', True, True, True),
id_='iaaa',
argnames='lazy,passive,raiseload'
)
"""
from . import config
keys = set()
for d in combinations:
keys.update(d)
keys = sorted(keys)
return config.combinations(
*[
("_".join(k for k in keys if d.get(k, False)),)
+ tuple(d.get(k, False) for k in keys)
for d in combinations
],
id_="i" + ("a" * len(keys)),
argnames=",".join(keys)
)
def resolve_lambda(__fn, **kw):
"""Given a no-arg lambda and a namespace, return a new lambda that
has all the values filled in.
This is used so that we can have module-level fixtures that
refer to instance-level variables using lambdas.
"""
pos_args = inspect_getfullargspec(__fn)[0]
pass_pos_args = {arg: kw.pop(arg) for arg in pos_args}
glb = dict(__fn.__globals__)
glb.update(kw)
new_fn = types.FunctionType(__fn.__code__, glb)
return new_fn(**pass_pos_args)
def metadata_fixture(ddl="function"):
"""Provide MetaData for a pytest fixture."""
from . import config
def decorate(fn):
def run_ddl(self):
from sqlalchemy import schema
metadata = self.metadata = schema.MetaData()
try:
result = fn(self, metadata)
metadata.create_all(config.db)
# TODO:
# somehow get a per-function dml erase fixture here
yield result
finally:
metadata.drop_all(config.db)
return config.fixture(scope=ddl)(run_ddl)
return decorate
def force_drop_names(*names):
"""Force the given table names to be dropped after test complete,
isolating for foreign key cycles
"""
from . import config
from sqlalchemy import inspect
@decorator
def go(fn, *args, **kw):
try:
return fn(*args, **kw)
finally:
drop_all_tables(config.db, inspect(config.db), include_names=names)
return go
class adict(dict):
"""Dict keys available as attributes. Shadows."""
def __getattribute__(self, key):
try:
return self[key]
except KeyError:
return dict.__getattribute__(self, key)
def __call__(self, *keys):
return tuple([self[key] for key in keys])
get_all = __call__
def drop_all_tables(engine, inspector, schema=None, include_names=None):
from sqlalchemy import (
Column,
Table,
Integer,
MetaData,
ForeignKeyConstraint,
)
from sqlalchemy.schema import DropTable, DropConstraint
if include_names is not None:
include_names = set(include_names)
with engine.connect() as conn:
for tname, fkcs in reversed(
inspector.get_sorted_table_and_fkc_names(schema=schema)
):
if tname:
if include_names is not None and tname not in include_names:
continue
conn.execute(
DropTable(Table(tname, MetaData(), schema=schema))
)
elif fkcs:
if not engine.dialect.supports_alter:
continue
for tname, fkc in fkcs:
if (
include_names is not None
and tname not in include_names
):
continue
tb = Table(
tname,
MetaData(),
Column("x", Integer),
Column("y", Integer),
schema=schema,
)
conn.execute(
DropConstraint(
ForeignKeyConstraint([tb.c.x], [tb.c.y], name=fkc)
)
)
def teardown_events(event_cls):
@decorator
def decorate(fn, *arg, **kw):
try:
return fn(*arg, **kw)
finally:
event_cls._clear()
return decorate
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/warnings.py | # testing/warnings.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import warnings
from . import assertions
from .. import exc as sa_exc
def setup_filters():
"""Set global warning behavior for the test suite."""
warnings.filterwarnings(
"ignore", category=sa_exc.SAPendingDeprecationWarning
)
warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning)
warnings.filterwarnings("error", category=sa_exc.SAWarning)
warnings.filterwarnings(
"ignore",
category=sa_exc.SAWarning,
message=r"Oracle compatibility version .* is known to have a "
"maximum identifier",
)
# some selected deprecations...
warnings.filterwarnings("error", category=DeprecationWarning)
warnings.filterwarnings(
"ignore", category=DeprecationWarning, message=".*StopIteration"
)
warnings.filterwarnings(
"ignore", category=DeprecationWarning, message=".*inspect.getargspec"
)
try:
import pytest
except ImportError:
pass
else:
warnings.filterwarnings(
"once", category=pytest.PytestDeprecationWarning
)
def assert_warnings(fn, warning_msgs, regex=False):
"""Assert that each of the given warnings are emitted by fn.
Deprecated. Please use assertions.expect_warnings().
"""
with assertions._expect_warnings(
sa_exc.SAWarning, warning_msgs, regex=regex
):
return fn()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/__init__.py | # testing/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import config # noqa
from . import mock # noqa
from .assertions import assert_raises # noqa
from .assertions import assert_raises_context_ok # noqa
from .assertions import assert_raises_message # noqa
from .assertions import assert_raises_message_context_ok # noqa
from .assertions import assert_raises_return # noqa
from .assertions import AssertsCompiledSQL # noqa
from .assertions import AssertsExecutionResults # noqa
from .assertions import ComparesTables # noqa
from .assertions import emits_warning # noqa
from .assertions import emits_warning_on # noqa
from .assertions import eq_ # noqa
from .assertions import eq_ignore_whitespace # noqa
from .assertions import eq_regex # noqa
from .assertions import expect_deprecated # noqa
from .assertions import expect_warnings # noqa
from .assertions import in_ # noqa
from .assertions import is_ # noqa
from .assertions import is_false # noqa
from .assertions import is_instance_of # noqa
from .assertions import is_not_ # noqa
from .assertions import is_true # noqa
from .assertions import le_ # noqa
from .assertions import ne_ # noqa
from .assertions import not_in_ # noqa
from .assertions import startswith_ # noqa
from .assertions import uses_deprecated # noqa
from .config import combinations # noqa
from .config import db # noqa
from .config import fixture # noqa
from .config import requirements as requires # noqa
from .exclusions import _is_excluded # noqa
from .exclusions import _server_version # noqa
from .exclusions import against as _against # noqa
from .exclusions import db_spec # noqa
from .exclusions import exclude # noqa
from .exclusions import fails # noqa
from .exclusions import fails_if # noqa
from .exclusions import fails_on # noqa
from .exclusions import fails_on_everything_except # noqa
from .exclusions import future # noqa
from .exclusions import only_if # noqa
from .exclusions import only_on # noqa
from .exclusions import skip # noqa
from .exclusions import skip_if # noqa
from .util import adict # noqa
from .util import fail # noqa
from .util import flag_combinations # noqa
from .util import force_drop_names # noqa
from .util import metadata_fixture # noqa
from .util import provide_metadata # noqa
from .util import resolve_lambda # noqa
from .util import rowset # noqa
from .util import run_as_contextmanager # noqa
from .util import teardown_events # noqa
from .warnings import assert_warnings # noqa
def against(*queries):
return _against(config._current, *queries)
crashes = skip
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/assertions.py | # testing/assertions.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import contextlib
import re
import sys
import warnings
from . import assertsql
from . import config
from . import mock
from . import util as testutil
from .exclusions import db_spec
from .util import fail
from .. import exc as sa_exc
from .. import pool
from .. import schema
from .. import types as sqltypes
from .. import util
from ..engine import default
from ..engine import url
from ..util import compat
from ..util import decorator
def expect_warnings(*messages, **kw):
"""Context manager which expects one or more warnings.
With no arguments, squelches all SAWarnings emitted via
sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
pass string expressions that will match selected warnings via regex;
all non-matching warnings are sent through.
The expect version **asserts** that the warnings were in fact seen.
Note that the test suite sets SAWarning warnings to raise exceptions.
"""
return _expect_warnings(sa_exc.SAWarning, messages, **kw)
@contextlib.contextmanager
def expect_warnings_on(db, *messages, **kw):
"""Context manager which expects one or more warnings on specific
dialects.
The expect version **asserts** that the warnings were in fact seen.
"""
spec = db_spec(db)
if isinstance(db, util.string_types) and not spec(config._current):
yield
else:
with expect_warnings(*messages, **kw):
yield
def emits_warning(*messages):
"""Decorator form of expect_warnings().
Note that emits_warning does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings(assert_=False, *messages):
return fn(*args, **kw)
return decorate
def expect_deprecated(*messages, **kw):
return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
def emits_warning_on(db, *messages):
"""Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
Note that emits_warning_on does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings_on(db, assert_=False, *messages):
return fn(*args, **kw)
return decorate
def uses_deprecated(*messages):
"""Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
Note that uses_deprecated does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_deprecated(*messages, assert_=False):
return fn(*args, **kw)
return decorate
@contextlib.contextmanager
def _expect_warnings(
exc_cls, messages, regex=True, assert_=True, py2konly=False
):
if regex:
filters = [re.compile(msg, re.I | re.S) for msg in messages]
else:
filters = messages
seen = set(filters)
real_warn = warnings.warn
def our_warn(msg, *arg, **kw):
if isinstance(msg, exc_cls):
exception = msg
msg = str(exception)
elif arg:
exception = arg[0]
else:
exception = None
if not exception or not issubclass(exception, exc_cls):
return real_warn(msg, *arg, **kw)
if not filters:
return
for filter_ in filters:
if (regex and filter_.match(msg)) or (
not regex and filter_ == msg
):
seen.discard(filter_)
break
else:
real_warn(msg, *arg, **kw)
with mock.patch("warnings.warn", our_warn):
yield
if assert_ and (not py2konly or not compat.py3k):
assert not seen, "Warnings were not seen: %s" % ", ".join(
"%r" % (s.pattern if regex else s) for s in seen
)
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
"""
_assert_no_stray_pool_connections()
_STRAY_CONNECTION_FAILURES = 0
def _assert_no_stray_pool_connections():
global _STRAY_CONNECTION_FAILURES
# lazy gc on cPython means "do nothing." pool connections
# shouldn't be in cycles, should go away.
testutil.lazy_gc()
# however, once in awhile, on an EC2 machine usually,
# there's a ref in there. usually just one.
if pool._refs:
# OK, let's be somewhat forgiving.
_STRAY_CONNECTION_FAILURES += 1
print(
"Encountered a stray connection in test cleanup: %s"
% str(pool._refs)
)
# then do a real GC sweep. We shouldn't even be here
# so a single sweep should really be doing it, otherwise
# there's probably a real unreachable cycle somewhere.
testutil.gc_collect()
# if we've already had two of these occurrences, or
# after a hard gc sweep we still have pool._refs?!
# now we have to raise.
if pool._refs:
err = str(pool._refs)
# but clean out the pool refs collection directly,
# reset the counter,
# so the error doesn't at least keep happening.
pool._refs.clear()
_STRAY_CONNECTION_FAILURES = 0
warnings.warn(
"Stray connection refused to leave " "after gc.collect(): %s" % err
)
elif _STRAY_CONNECTION_FAILURES > 10:
assert False, "Encountered more than 10 stray connections"
_STRAY_CONNECTION_FAILURES = 0
def eq_regex(a, b, msg=None):
assert re.match(b, a), msg or "%r !~ %r" % (a, b)
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def le_(a, b, msg=None):
"""Assert a <= b, with repr messaging on failure."""
assert a <= b, msg or "%r != %r" % (a, b)
def is_instance_of(a, b, msg=None):
assert isinstance(a, b), msg or "%r is not an instance of %r" % (a, b)
def is_true(a, msg=None):
is_(a, True, msg=msg)
def is_false(a, msg=None):
is_(a, False, msg=msg)
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
def is_not_(a, b, msg=None):
"""Assert a is not b, with repr messaging on failure."""
assert a is not b, msg or "%r is %r" % (a, b)
def in_(a, b, msg=None):
"""Assert a in b, with repr messaging on failure."""
assert a in b, msg or "%r not in %r" % (a, b)
def not_in_(a, b, msg=None):
"""Assert a in not b, with repr messaging on failure."""
assert a not in b, msg or "%r is in %r" % (a, b)
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
a,
fragment,
)
def eq_ignore_whitespace(a, b, msg=None):
a = re.sub(r"^\s+?|\n", "", a)
a = re.sub(r" {2,}", " ", a)
b = re.sub(r"^\s+?|\n", "", b)
b = re.sub(r" {2,}", " ", b)
assert a == b, msg or "%r != %r" % (a, b)
def _assert_proper_exception_context(exception):
"""assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. we want
these exceptions in a cause chain.
"""
if not util.py3k:
return
if (
exception.__context__ is not exception.__cause__
and not exception.__suppress_context__
):
assert False, (
"Exception %r was correctly raised but did not set a cause, "
"within context %r as its cause."
% (exception, exception.__context__)
)
def assert_raises(except_cls, callable_, *args, **kw):
_assert_raises(except_cls, callable_, args, kw, check_context=True)
def assert_raises_context_ok(except_cls, callable_, *args, **kw):
_assert_raises(
except_cls, callable_, args, kw,
)
def assert_raises_return(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw, check_context=True)
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
_assert_raises(
except_cls, callable_, args, kwargs, msg=msg, check_context=True
)
def assert_raises_message_context_ok(
except_cls, msg, callable_, *args, **kwargs
):
_assert_raises(except_cls, callable_, args, kwargs, msg=msg)
def _assert_raises(
except_cls, callable_, args, kwargs, msg=None, check_context=False
):
ret_err = None
if check_context:
are_we_already_in_a_traceback = sys.exc_info()[0]
try:
callable_(*args, **kwargs)
success = False
except except_cls as err:
ret_err = err
success = True
if msg is not None:
assert re.search(
msg, util.text_type(err), re.UNICODE
), "%r !~ %s" % (msg, err,)
if check_context and not are_we_already_in_a_traceback:
_assert_proper_exception_context(err)
print(util.text_type(err).encode("utf-8"))
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
return ret_err
class AssertsCompiledSQL(object):
def assert_compile(
self,
clause,
result,
params=None,
checkparams=None,
dialect=None,
checkpositional=None,
check_prefetch=None,
use_default_dialect=False,
allow_dialect_select=False,
literal_binds=False,
schema_translate_map=None,
):
if use_default_dialect:
dialect = default.DefaultDialect()
elif allow_dialect_select:
dialect = None
else:
if dialect is None:
dialect = getattr(self, "__dialect__", None)
if dialect is None:
dialect = config.db.dialect
elif dialect == "default":
dialect = default.DefaultDialect()
elif dialect == "default_enhanced":
dialect = default.StrCompileDialect()
elif isinstance(dialect, util.string_types):
dialect = url.URL(dialect).get_dialect()()
kw = {}
compile_kwargs = {}
if schema_translate_map:
kw["schema_translate_map"] = schema_translate_map
if params is not None:
kw["column_keys"] = list(params)
if literal_binds:
compile_kwargs["literal_binds"] = True
from sqlalchemy import orm
if isinstance(clause, orm.Query):
context = clause._compile_context()
context.statement.use_labels = True
clause = context.statement
elif isinstance(clause, orm.persistence.BulkUD):
with mock.patch.object(clause, "_execute_stmt") as stmt_mock:
clause.exec_()
clause = stmt_mock.mock_calls[0][1][0]
if compile_kwargs:
kw["compile_kwargs"] = compile_kwargs
c = clause.compile(dialect=dialect, **kw)
param_str = repr(getattr(c, "params", {}))
if util.py3k:
param_str = param_str.encode("utf-8").decode("ascii", "ignore")
print(
("\nSQL String:\n" + util.text_type(c) + param_str).encode(
"utf-8"
)
)
else:
print(
"\nSQL String:\n"
+ util.text_type(c).encode("utf-8")
+ param_str
)
cc = re.sub(r"[\n\t]", "", util.text_type(c))
eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect))
if checkparams is not None:
eq_(c.construct_params(params), checkparams)
if checkpositional is not None:
p = c.construct_params(params)
eq_(tuple([p[x] for x in c.positiontup]), checkpositional)
if check_prefetch is not None:
eq_(c.prefetch, check_prefetch)
class ComparesTables(object):
def assert_tables_equal(self, table, reflected_table, strict_types=False):
assert len(table.c) == len(reflected_table.c)
for c, reflected_c in zip(table.c, reflected_table.c):
eq_(c.name, reflected_c.name)
assert reflected_c is reflected_table.c[c.name]
eq_(c.primary_key, reflected_c.primary_key)
eq_(c.nullable, reflected_c.nullable)
if strict_types:
msg = "Type '%s' doesn't correspond to type '%s'"
assert isinstance(reflected_c.type, type(c.type)), msg % (
reflected_c.type,
c.type,
)
else:
self.assert_types_base(reflected_c, c)
if isinstance(c.type, sqltypes.String):
eq_(c.type.length, reflected_c.type.length)
eq_(
{f.column.name for f in c.foreign_keys},
{f.column.name for f in reflected_c.foreign_keys},
)
if c.server_default:
assert isinstance(
reflected_c.server_default, schema.FetchedValue
)
assert len(table.primary_key) == len(reflected_table.primary_key)
for c in table.primary_key:
assert reflected_table.primary_key.columns[c.name] is not None
def assert_types_base(self, c1, c2):
assert c1.type._compare_type_affinity(c2.type), (
"On column %r, type '%s' doesn't correspond to type '%s'"
% (c1.name, c1.type, c2.type)
)
class AssertsExecutionResults(object):
def assert_result(self, result, class_, *objects):
result = list(result)
print(repr(result))
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list_):
self.assert_(
len(result) == len(list_),
"result list is not the same size as test list, "
+ "for class "
+ class_.__name__,
)
for i in range(0, len(list_)):
self.assert_row(class_, result[i], list_[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(
rowobj.__class__ is class_, "item class is not " + repr(class_)
)
for key, value in desc.items():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(
getattr(rowobj, key) == value,
"attribute %s value %s does not match %s"
% (key, getattr(rowobj, key), value),
)
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = {immutabledict(e) for e in expected}
for wrong in util.itertools_filterfalse(
lambda o: isinstance(o, cls), found
):
fail(
'Unexpected type "%s", expected "%s"'
% (type(wrong).__name__, cls.__name__)
)
if len(found) != len(expected):
fail(
'Unexpected object count "%s", expected "%s"'
% (len(found), len(expected))
)
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1]
)
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found."
% (cls.__name__, repr(expected_item))
)
return True
def sql_execution_asserter(self, db=None):
if db is None:
from . import db as db
return assertsql.assert_engine(db)
def assert_sql_execution(self, db, callable_, *rules):
with self.sql_execution_asserter(db) as asserter:
result = callable_()
asserter.assert_(*rules)
return result
def assert_sql(self, db, callable_, rules):
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(
*[assertsql.CompiledSQL(k, v) for k, v in rule.items()]
)
else:
newrule = assertsql.CompiledSQL(*rule)
newrules.append(newrule)
return self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count)
)
def assert_multiple_sql_count(self, dbs, callable_, counts):
recs = [
(self.sql_execution_asserter(db), db, count)
for (db, count) in zip(dbs, counts)
]
asserters = []
for ctx, db, count in recs:
asserters.append(ctx.__enter__())
try:
return callable_()
finally:
for asserter, (ctx, db, count) in zip(asserters, recs):
ctx.__exit__(None, None, None)
asserter.assert_(assertsql.CountStatements(count))
@contextlib.contextmanager
def assert_execution(self, db, *rules):
with self.sql_execution_asserter(db) as asserter:
yield
asserter.assert_(*rules)
def assert_statement_count(self, db, count):
return self.assert_execution(db, assertsql.CountStatements(count))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/replay_fixture.py | from collections import deque
import contextlib
import types
from . import config
from . import fixtures
from . import profiling
from .. import create_engine
from .. import MetaData
from .. import util
from ..orm import Session
class ReplayFixtureTest(fixtures.TestBase):
@contextlib.contextmanager
def _dummy_ctx(self, *arg, **kw):
yield
def test_invocation(self):
dbapi_session = ReplayableSession()
creator = config.db.pool._creator
def recorder():
return dbapi_session.recorder(creator())
engine = create_engine(
config.db.url, creator=recorder, use_native_hstore=False
)
self.metadata = MetaData(engine)
self.engine = engine
self.session = Session(engine)
self.setup_engine()
try:
self._run_steps(ctx=self._dummy_ctx)
finally:
self.teardown_engine()
engine.dispose()
def player():
return dbapi_session.player()
engine = create_engine(
config.db.url, creator=player, use_native_hstore=False
)
self.metadata = MetaData(engine)
self.engine = engine
self.session = Session(engine)
self.setup_engine()
try:
self._run_steps(ctx=profiling.count_functions)
finally:
self.session.close()
engine.dispose()
def setup_engine(self):
pass
def teardown_engine(self):
pass
def _run_steps(self, ctx):
raise NotImplementedError()
class ReplayableSession(object):
"""A simple record/playback tool.
This is *not* a mock testing class. It only records a session for later
playback and makes no assertions on call consistency whatsoever. It's
unlikely to be suitable for anything other than DB-API recording.
"""
Callable = object()
NoAttribute = object()
if util.py2k:
Natives = set(
[getattr(types, t) for t in dir(types) if not t.startswith("_")]
).difference(
[
getattr(types, t)
for t in (
"FunctionType",
"BuiltinFunctionType",
"MethodType",
"BuiltinMethodType",
"LambdaType",
"UnboundMethodType",
)
]
)
else:
Natives = (
set(
[
getattr(types, t)
for t in dir(types)
if not t.startswith("_")
]
)
.union(
[
type(t) if not isinstance(t, type) else t
for t in __builtins__.values()
]
)
.difference(
[
getattr(types, t)
for t in (
"FunctionType",
"BuiltinFunctionType",
"MethodType",
"BuiltinMethodType",
"LambdaType",
)
]
)
)
def __init__(self):
self.buffer = deque()
def recorder(self, base):
return self.Recorder(self.buffer, base)
def player(self):
return self.Player(self.buffer)
class Recorder(object):
def __init__(self, buffer, subject):
self._buffer = buffer
self._subject = subject
def __call__(self, *args, **kw):
subject, buffer = [
object.__getattribute__(self, x)
for x in ("_subject", "_buffer")
]
result = subject(*args, **kw)
if type(result) not in ReplayableSession.Natives:
buffer.append(ReplayableSession.Callable)
return type(self)(buffer, result)
else:
buffer.append(result)
return result
@property
def _sqla_unwrap(self):
return self._subject
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
pass
subject, buffer = [
object.__getattribute__(self, x)
for x in ("_subject", "_buffer")
]
try:
result = type(subject).__getattribute__(subject, key)
except AttributeError:
buffer.append(ReplayableSession.NoAttribute)
raise
else:
if type(result) not in ReplayableSession.Natives:
buffer.append(ReplayableSession.Callable)
return type(self)(buffer, result)
else:
buffer.append(result)
return result
class Player(object):
def __init__(self, buffer):
self._buffer = buffer
def __call__(self, *args, **kw):
buffer = object.__getattribute__(self, "_buffer")
result = buffer.popleft()
if result is ReplayableSession.Callable:
return self
else:
return result
@property
def _sqla_unwrap(self):
return None
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
pass
buffer = object.__getattribute__(self, "_buffer")
result = buffer.popleft()
if result is ReplayableSession.Callable:
return self
elif result is ReplayableSession.NoAttribute:
raise AttributeError(key)
else:
return result
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/assertsql.py | # testing/assertsql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import collections
import contextlib
import re
from .. import event
from .. import util
from ..engine import url
from ..engine.default import DefaultDialect
from ..engine.util import _distill_params
from ..schema import _DDLCompiles
class AssertRule(object):
is_consumed = False
errormessage = None
consume_statement = True
def process_statement(self, execute_observed):
pass
def no_more_statements(self):
assert False, (
"All statements are complete, but pending "
"assertion rules remain"
)
class SQLMatchRule(AssertRule):
pass
class CursorSQL(SQLMatchRule):
consume_statement = False
def __init__(self, statement, params=None):
self.statement = statement
self.params = params
def process_statement(self, execute_observed):
stmt = execute_observed.statements[0]
if self.statement != stmt.statement or (
self.params is not None and self.params != stmt.parameters
):
self.errormessage = (
"Testing for exact SQL %s parameters %s received %s %s"
% (
self.statement,
self.params,
stmt.statement,
stmt.parameters,
)
)
else:
execute_observed.statements.pop(0)
self.is_consumed = True
if not execute_observed.statements:
self.consume_statement = True
class CompiledSQL(SQLMatchRule):
def __init__(self, statement, params=None, dialect="default"):
self.statement = statement
self.params = params
self.dialect = dialect
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r"[\n\t]", "", self.statement)
return received_statement == stmt
def _compile_dialect(self, execute_observed):
if self.dialect == "default":
return DefaultDialect()
else:
# ugh
if self.dialect == "postgresql":
params = {"implicit_returning": True}
else:
params = {}
return url.URL(self.dialect).get_dialect()(**params)
def _received_statement(self, execute_observed):
"""reconstruct the statement and params in terms
of a target dialect, which for CompiledSQL is just DefaultDialect."""
context = execute_observed.context
compare_dialect = self._compile_dialect(execute_observed)
if isinstance(context.compiled.statement, _DDLCompiles):
compiled = context.compiled.statement.compile(
dialect=compare_dialect,
schema_translate_map=context.execution_options.get(
"schema_translate_map"
),
)
else:
compiled = context.compiled.statement.compile(
dialect=compare_dialect,
column_keys=context.compiled.column_keys,
inline=context.compiled.inline,
schema_translate_map=context.execution_options.get(
"schema_translate_map"
),
)
_received_statement = re.sub(r"[\n\t]", "", util.text_type(compiled))
parameters = execute_observed.parameters
if not parameters:
_received_parameters = [compiled.construct_params()]
else:
_received_parameters = [
compiled.construct_params(m) for m in parameters
]
return _received_statement, _received_parameters
def process_statement(self, execute_observed):
context = execute_observed.context
_received_statement, _received_parameters = self._received_statement(
execute_observed
)
params = self._all_params(context)
equivalent = self._compare_sql(execute_observed, _received_statement)
if equivalent:
if params is not None:
all_params = list(params)
all_received = list(_received_parameters)
while all_params and all_received:
param = dict(all_params.pop(0))
for idx, received in enumerate(list(all_received)):
# do a positive compare only
for param_key in param:
# a key in param did not match current
# 'received'
if (
param_key not in received
or received[param_key] != param[param_key]
):
break
else:
# all keys in param matched 'received';
# onto next param
del all_received[idx]
break
else:
# param did not match any entry
# in all_received
equivalent = False
break
if all_params or all_received:
equivalent = False
if equivalent:
self.is_consumed = True
self.errormessage = None
else:
self.errormessage = self._failure_message(params) % {
"received_statement": _received_statement,
"received_parameters": _received_parameters,
}
def _all_params(self, context):
if self.params:
if util.callable(self.params):
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
return params
else:
return None
def _failure_message(self, expected_params):
return (
"Testing for compiled statement %r partial params %s, "
"received %%(received_statement)r with params "
"%%(received_parameters)r"
% (
self.statement.replace("%", "%%"),
repr(expected_params).replace("%", "%%"),
)
)
class RegexSQL(CompiledSQL):
def __init__(self, regex, params=None):
SQLMatchRule.__init__(self)
self.regex = re.compile(regex)
self.orig_regex = regex
self.params = params
self.dialect = "default"
def _failure_message(self, expected_params):
return (
"Testing for compiled statement ~%r partial params %s, "
"received %%(received_statement)r with params "
"%%(received_parameters)r"
% (
self.orig_regex.replace("%", "%%"),
repr(expected_params).replace("%", "%%"),
)
)
def _compare_sql(self, execute_observed, received_statement):
return bool(self.regex.match(received_statement))
class DialectSQL(CompiledSQL):
def _compile_dialect(self, execute_observed):
return execute_observed.context.dialect
def _compare_no_space(self, real_stmt, received_stmt):
stmt = re.sub(r"[\n\t]", "", real_stmt)
return received_stmt == stmt
def _received_statement(self, execute_observed):
received_stmt, received_params = super(
DialectSQL, self
)._received_statement(execute_observed)
# TODO: why do we need this part?
for real_stmt in execute_observed.statements:
if self._compare_no_space(real_stmt.statement, received_stmt):
break
else:
raise AssertionError(
"Can't locate compiled statement %r in list of "
"statements actually invoked" % received_stmt
)
return received_stmt, execute_observed.context.compiled_parameters
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r"[\n\t]", "", self.statement)
# convert our comparison statement to have the
# paramstyle of the received
paramstyle = execute_observed.context.dialect.paramstyle
if paramstyle == "pyformat":
stmt = re.sub(r":([\w_]+)", r"%(\1)s", stmt)
else:
# positional params
repl = None
if paramstyle == "qmark":
repl = "?"
elif paramstyle == "format":
repl = r"%s"
elif paramstyle == "numeric":
repl = None
stmt = re.sub(r":([\w_]+)", repl, stmt)
return received_statement == stmt
class CountStatements(AssertRule):
def __init__(self, count):
self.count = count
self._statement_count = 0
def process_statement(self, execute_observed):
self._statement_count += 1
def no_more_statements(self):
if self.count != self._statement_count:
assert False, "desired statement count %d does not match %d" % (
self.count,
self._statement_count,
)
class AllOf(AssertRule):
def __init__(self, *rules):
self.rules = set(rules)
def process_statement(self, execute_observed):
for rule in list(self.rules):
rule.errormessage = None
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.discard(rule)
if not self.rules:
self.is_consumed = True
break
elif not rule.errormessage:
# rule is not done yet
self.errormessage = None
break
else:
self.errormessage = list(self.rules)[0].errormessage
class EachOf(AssertRule):
def __init__(self, *rules):
self.rules = list(rules)
def process_statement(self, execute_observed):
while self.rules:
rule = self.rules[0]
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.pop(0)
elif rule.errormessage:
self.errormessage = rule.errormessage
if rule.consume_statement:
break
if not self.rules:
self.is_consumed = True
def no_more_statements(self):
if self.rules and not self.rules[0].is_consumed:
self.rules[0].no_more_statements()
elif self.rules:
super(EachOf, self).no_more_statements()
class Or(AllOf):
def process_statement(self, execute_observed):
for rule in self.rules:
rule.process_statement(execute_observed)
if rule.is_consumed:
self.is_consumed = True
break
else:
self.errormessage = list(self.rules)[0].errormessage
class SQLExecuteObserved(object):
def __init__(self, context, clauseelement, multiparams, params):
self.context = context
self.clauseelement = clauseelement
self.parameters = _distill_params(multiparams, params)
self.statements = []
class SQLCursorExecuteObserved(
collections.namedtuple(
"SQLCursorExecuteObserved",
["statement", "parameters", "context", "executemany"],
)
):
pass
class SQLAsserter(object):
def __init__(self):
self.accumulated = []
def _close(self):
self._final = self.accumulated
del self.accumulated
def assert_(self, *rules):
rule = EachOf(*rules)
observed = list(self._final)
while observed:
statement = observed.pop(0)
rule.process_statement(statement)
if rule.is_consumed:
break
elif rule.errormessage:
assert False, rule.errormessage
if observed:
assert False, "Additional SQL statements remain"
elif not rule.is_consumed:
rule.no_more_statements()
@contextlib.contextmanager
def assert_engine(engine):
asserter = SQLAsserter()
orig = []
@event.listens_for(engine, "before_execute")
def connection_execute(conn, clauseelement, multiparams, params):
# grab the original statement + params before any cursor
# execution
orig[:] = clauseelement, multiparams, params
@event.listens_for(engine, "after_cursor_execute")
def cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
if not context:
return
# then grab real cursor statements and associate them all
# around a single context
if (
asserter.accumulated
and asserter.accumulated[-1].context is context
):
obs = asserter.accumulated[-1]
else:
obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2])
asserter.accumulated.append(obs)
obs.statements.append(
SQLCursorExecuteObserved(
statement, parameters, context, executemany
)
)
try:
yield asserter
finally:
event.remove(engine, "after_cursor_execute", cursor_execute)
event.remove(engine, "before_execute", connection_execute)
asserter._close()
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/provision.py | import collections
import logging
from . import config
from . import engines
from ..engine import url as sa_url
from ..util import compat
log = logging.getLogger(__name__)
FOLLOWER_IDENT = None
class register(object):
def __init__(self):
self.fns = {}
@classmethod
def init(cls, fn):
return register().for_db("*")(fn)
def for_db(self, dbname):
def decorate(fn):
self.fns[dbname] = fn
return self
return decorate
def __call__(self, cfg, *arg):
if isinstance(cfg, compat.string_types):
url = sa_url.make_url(cfg)
elif isinstance(cfg, sa_url.URL):
url = cfg
else:
url = cfg.db.url
backend = url.get_backend_name()
if backend in self.fns:
return self.fns[backend](cfg, *arg)
else:
return self.fns["*"](cfg, *arg)
def create_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
log.info("CREATE database %s, URI %r", follower_ident, cfg.db.url)
create_db(cfg, cfg.db, follower_ident)
def setup_config(db_url, options, file_config, follower_ident):
# load the dialect, which should also have it set up its provision
# hooks
dialect = sa_url.make_url(db_url).get_dialect()
dialect.load_provisioning()
if follower_ident:
db_url = follower_url_from_main(db_url, follower_ident)
db_opts = {}
update_db_opts(db_url, db_opts)
eng = engines.testing_engine(db_url, db_opts)
post_configure_engine(db_url, eng, follower_ident)
eng.connect().close()
cfg = config.Config.register(eng, db_opts, options, file_config)
if follower_ident:
configure_follower(cfg, follower_ident)
return cfg
def drop_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
log.info("DROP database %s, URI %r", follower_ident, cfg.db.url)
drop_db(cfg, cfg.db, follower_ident)
def _configs_for_db_operation():
hosts = set()
for cfg in config.Config.all_configs():
cfg.db.dispose()
for cfg in config.Config.all_configs():
url = cfg.db.url
backend = url.get_backend_name()
host_conf = (backend, url.username, url.host, url.database)
if host_conf not in hosts:
yield cfg
hosts.add(host_conf)
for cfg in config.Config.all_configs():
cfg.db.dispose()
@register.init
def create_db(cfg, eng, ident):
"""Dynamically create a database for testing.
Used when a test run will employ multiple processes, e.g., when run
via `tox` or `pytest -n4`.
"""
raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url)
@register.init
def drop_db(cfg, eng, ident):
"""Drop a database that we dynamically created for testing."""
raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url)
@register.init
def update_db_opts(db_url, db_opts):
"""Set database options (db_opts) for a test database that we created.
"""
pass
@register.init
def post_configure_engine(url, engine, follower_ident):
"""Perform extra steps after configuring an engine for testing.
(For the internal dialects, currently only used by sqlite.)
"""
pass
@register.init
def follower_url_from_main(url, ident):
"""Create a connection URL for a dynamically-created test database.
:param url: the connection URL specified when the test run was invoked
:param ident: the pytest-xdist "worker identifier" to be used as the
database name
"""
url = sa_url.make_url(url)
url.database = ident
return url
@register.init
def configure_follower(cfg, ident):
"""Create dialect-specific config settings for a follower database."""
pass
@register.init
def run_reap_dbs(url, ident):
"""Remove databases that were created during the test process, after the
process has ended.
This is an optional step that is invoked for certain backends that do not
reliably release locks on the database as long as a process is still in
use. For the internal dialects, this is currently only necessary for
mssql and oracle.
"""
pass
def reap_dbs(idents_file):
log.info("Reaping databases...")
urls = collections.defaultdict(set)
idents = collections.defaultdict(set)
dialects = {}
with open(idents_file) as file_:
for line in file_:
line = line.strip()
db_name, db_url = line.split(" ")
url_obj = sa_url.make_url(db_url)
if db_name not in dialects:
dialects[db_name] = url_obj.get_dialect()
dialects[db_name].load_provisioning()
url_key = (url_obj.get_backend_name(), url_obj.host)
urls[url_key].add(db_url)
idents[url_key].add(db_name)
for url_key in urls:
url = list(urls[url_key])[0]
ident = idents[url_key]
run_reap_dbs(url, ident)
@register.init
def temp_table_keyword_args(cfg, eng):
"""Specify keyword arguments for creating a temporary Table.
Dialect-specific implementations of this method will return the
kwargs that are passed to the Table method when creating a temporary
table for testing, e.g., in the define_temp_tables method of the
ComponentReflectionTest class in suite/test_reflection.py
"""
raise NotImplementedError(
"no temp table keyword args routine for cfg: %s" % eng.url
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/requirements.py | # testing/requirements.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
import sys
from . import exclusions
from .. import util
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled
or self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def sql_expression_limit_offset(self):
"""target database can render LIMIT and/or OFFSET with a complete
SQL expression, such as one that uses the addition operator.
parameter
"""
return exclusions.open()
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite.
"""
return exclusions.open()
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullable_booleans(self):
"""Target database allows boolean columns to store NULL."""
return exclusions.open()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def standalone_null_binds_whereclause(self):
"""target database/driver supports bound parameters with NULL in the
WHERE clause, in situations where it has to be typed.
"""
return exclusions.open()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def ctes(self):
"""Target database supports CTEs"""
return exclusions.closed()
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return exclusions.closed()
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def group_by_complex_expression(self):
"""target platform supports SQL expressions in GROUP BY
e.g.
SELECT x + y AS somelabel FROM table GROUP BY x + y
"""
return exclusions.open()
@property
def sane_rowcount(self):
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_sane_rowcount,
"driver doesn't support 'sane' rowcount",
)
@property
def sane_multi_rowcount(self):
return exclusions.fails_if(
lambda config: not config.db.dialect.supports_sane_multi_rowcount,
"driver %(driver)s %(doesnt_support)s 'sane' multi row count",
)
@property
def sane_rowcount_w_returning(self):
return exclusions.fails_if(
lambda config: not (
config.db.dialect.supports_sane_rowcount_returning
),
"driver doesn't support 'sane' rowcount when returning is on",
)
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert
or config.db.dialect.supports_default_values,
"empty inserts not supported",
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'returning'",
)
@property
def tuple_in(self):
"""Target platform supports the syntax
"(x, y) IN ((x1, y1), (x2, y2), ...)"
"""
return exclusions.closed()
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names.",
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts.",
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign keys
"""
return exclusions.closed()
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return exclusions.closed()
@property
def server_side_cursors(self):
"""Target dialect must support server side cursors."""
return exclusions.only_if(
[lambda config: config.db.dialect.supports_server_side_cursors],
"no server side cursors support",
)
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if(
[lambda config: config.db.dialect.supports_sequences],
"no sequence support",
)
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if(
[
lambda config: config.db.dialect.supports_sequences
and config.db.dialect.sequences_optional
],
"no sequence support, or sequences not optional",
)
@property
def supports_lastrowid(self):
"""target database / driver supports cursor.lastrowid as a means
of retrieving the last inserted primary key value.
note that if the target DB supports sequences also, this is still
assumed to work. This is a new use case brought on by MariaDB 10.3.
"""
return exclusions.only_if(
[lambda config: config.db.dialect.postfetch_lastrowid]
)
@property
def no_lastrowid_support(self):
"""the opposite of supports_lastrowid"""
return exclusions.NotPredicate(self.supports_lastrowid)
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def comment_reflection(self):
return exclusions.closed()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return exclusions.closed()
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def indexes_with_ascdesc(self):
"""target database supports CREATE INDEX with per-column ASC/DESC."""
return exclusions.open()
@property
def indexes_with_expressions(self):
"""target database supports CREATE INDEX against SQL expressions."""
return exclusions.closed()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def check_constraint_reflection(self):
"""target dialect supports reflection of check constraints"""
return exclusions.closed()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return exclusions.closed()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return exclusions.closed()
@property
def isolation_level(self):
"""target dialect supports general isolation level settings.
Note that this requirement, when enabled, also requires that
the get_isolation_levels() method be implemented.
"""
return exclusions.closed()
def get_isolation_levels(self, config):
"""Return a structure of supported isolation levels for the current
testing dialect.
The structure indicates to the testing suite what the expected
"default" isolation should be, as well as the other values that
are accepted. The dictionary has two keys, "default" and "supported".
The "supported" key refers to a list of all supported levels and
it should include AUTOCOMMIT if the dialect supports it.
If the :meth:`.DefaultRequirements.isolation_level` requirement is
not open, then this method has no return value.
E.g.::
>>> testing.requirements.get_isolation_levels()
{
"default": "READ_COMMITED",
"supported": [
"SERIALIZABLE", "READ UNCOMMITTED",
"READ COMMITTED", "REPEATABLE READ",
"AUTOCOMMIT"
]
}
"""
@property
def json_type(self):
"""target platform implements a native JSON type."""
return exclusions.closed()
@property
def json_array_indexes(self):
""""target platform supports numeric array indexes
within a JSON structure"""
return self.json_type
@property
def json_index_supplementary_unicode_element(self):
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(
select([literal(expr)])
)
assert value == expr
See :ticket:`4036`
"""
return exclusions.open()
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate
"""
return exclusions.open()
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key
"""
return exclusions.open()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as::
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
"""
return exclusions.open()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including PostgreSQL don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def order_by_collation(self):
def check(config):
try:
self.get_order_by_collation(config)
return False
except NotImplementedError:
return True
return exclusions.skip_if(check)
def get_order_by_collation(self, config):
raise NotImplementedError()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this
requirement as not present.
"""
return exclusions.skip_if(
lambda config: config.options.low_connections
)
@property
def timing_intensive(self):
return exclusions.requires_tag("timing_intensive")
@property
def memory_intensive(self):
return exclusions.requires_tag("memory_intensive")
@property
def threading_with_mock(self):
"""Mark tests that use threading and mock at the same time - stability
issues have been observed with coverage + python 3.3
"""
return exclusions.skip_if(
lambda config: util.py3k and config.options.has_coverage,
"Stability issues with coverage + py3k",
)
@property
def python2(self):
return exclusions.skip_if(
lambda: sys.version_info >= (3,),
"Python version 2.xx is required.",
)
@property
def python3(self):
return exclusions.skip_if(
lambda: sys.version_info < (3,), "Python version 3.xx is required."
)
@property
def python37(self):
return exclusions.skip_if(
lambda: sys.version_info < (3, 7),
"Python version 3.7 or greater is required.",
)
@property
def cpython(self):
return exclusions.only_if(
lambda: util.cpython, "cPython interpreter needed"
)
@property
def non_broken_pickle(self):
from sqlalchemy.util import pickle
return exclusions.only_if(
lambda: not util.pypy
and pickle.__name__ == "cPickle"
or sys.version_info >= (3, 2),
"Needs cPickle+cPython or newer Python 3 pickle",
)
@property
def predictable_gc(self):
"""target platform must remove all cycles unconditionally when
gc.collect() is called, as well as clean out unreferenced subclasses.
"""
return self.cpython
@property
def no_coverage(self):
"""Test should be skipped if coverage is enabled.
This is to block tests that exercise libraries that seem to be
sensitive to coverage, such as PostgreSQL notice logging.
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Issues observed when coverage is enabled",
)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine("sqlite://")
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors # noqa
return True
except ImportError:
return False
@property
def computed_columns(self):
"Supports computed columns"
return exclusions.closed()
@property
def computed_columns_stored(self):
"Supports computed columns with `persisted=True`"
return exclusions.closed()
@property
def computed_columns_virtual(self):
"Supports computed columns with `persisted=False`"
return exclusions.closed()
@property
def computed_columns_default_persisted(self):
"""If the default persistence is virtual or stored when `persisted`
is omitted"""
return exclusions.closed()
@property
def computed_columns_reflect_persisted(self):
"""If persistence information is returned by the reflection of
computed columns"""
return exclusions.closed()
@property
def supports_is_distinct_from(self):
"""Supports some form of "x IS [NOT] DISTINCT FROM y" construct.
Different dialects will implement their own flavour, e.g.,
sqlite will emit "x IS NOT y" instead of "x IS DISTINCT FROM y".
.. seealso::
:meth:`.ColumnOperators.is_distinct_from`
"""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_is_distinct_from,
"driver doesn't support an IS DISTINCT FROM construct",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/entities.py | # testing/entities.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sqlalchemy as sa
from .. import exc as sa_exc
from ..util import compat
_repr_stack = set()
class BasicEntity(object):
def __init__(self, **kw):
for key, value in kw.items():
setattr(self, key, value)
def __repr__(self):
if id(self) in _repr_stack:
return object.__repr__(self)
_repr_stack.add(id(self))
try:
return "%s(%s)" % (
(self.__class__.__name__),
", ".join(
[
"%s=%r" % (key, getattr(self, key))
for key in sorted(self.__dict__.keys())
if not key.startswith("_")
]
),
)
finally:
_repr_stack.remove(id(self))
_recursion_stack = set()
class ComparableEntity(BasicEntity):
def __hash__(self):
return hash(self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
"""'Deep, sparse compare.
Deeply compare two entities, following the non-None attributes of the
non-persisted object, if possible.
"""
if other is self:
return True
elif not self.__class__ == other.__class__:
return False
if id(self) in _recursion_stack:
return True
_recursion_stack.add(id(self))
try:
# pick the entity that's not SA persisted as the source
try:
self_key = sa.orm.attributes.instance_state(self).key
except sa.orm.exc.NO_STATE:
self_key = None
if other is None:
a = self
b = other
elif self_key is not None:
a = other
b = self
else:
a = self
b = other
for attr in list(a.__dict__):
if attr.startswith("_"):
continue
value = getattr(a, attr)
try:
# handle lazy loader errors
battr = getattr(b, attr)
except (AttributeError, sa_exc.UnboundExecutionError):
return False
if hasattr(value, "__iter__") and not isinstance(
value, compat.string_types
):
if hasattr(value, "__getitem__") and not hasattr(
value, "keys"
):
if list(value) != list(battr):
return False
else:
if set(value) != set(battr):
return False
else:
if value is not None and value != battr:
return False
return True
finally:
_recursion_stack.remove(id(self))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/profiling.py | # testing/profiling.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Profiling support for unit and performance tests.
These are special purpose profiling methods which operate
in a more fine-grained way than nose's profiling plugin.
"""
import collections
import contextlib
import os
import pstats
import sys
from . import config
from .util import gc_collect
from ..util import jython
from ..util import pypy
from ..util import update_wrapper
from ..util import win32
try:
import cProfile
except ImportError:
cProfile = None
_current_test = None
# ProfileStatsFile instance, set up in plugin_base
_profile_stats = None
class ProfileStatsFile(object):
""""Store per-platform/fn profiling results in a file.
We're still targeting Py2.5, 2.4 on 0.7 with no dependencies,
so no json lib :( need to roll something silly
"""
def __init__(self, filename):
self.force_write = (
config.options is not None and config.options.force_write_profiles
)
self.write = self.force_write or (
config.options is not None and config.options.write_profiles
)
self.fname = os.path.abspath(filename)
self.short_fname = os.path.split(self.fname)[-1]
self.data = collections.defaultdict(
lambda: collections.defaultdict(dict)
)
self._read()
if self.write:
# rewrite for the case where features changed,
# etc.
self._write()
@property
def platform_key(self):
dbapi_key = config.db.name + "_" + config.db.driver
if config.db.name == "sqlite" and config.db.dialect._is_url_file_db(
config.db.url
):
dbapi_key += "_file"
# keep it at 2.7, 3.1, 3.2, etc. for now.
py_version = ".".join([str(v) for v in sys.version_info[0:2]])
platform_tokens = [py_version]
platform_tokens.append(dbapi_key)
if jython:
platform_tokens.append("jython")
if pypy:
platform_tokens.append("pypy")
if win32:
platform_tokens.append("win")
platform_tokens.append(
"nativeunicode"
if config.db.dialect.convert_unicode
else "dbapiunicode"
)
_has_cext = config.requirements._has_cextensions()
platform_tokens.append(_has_cext and "cextensions" or "nocextensions")
return "_".join(platform_tokens)
def has_stats(self):
test_key = _current_test
return (
test_key in self.data and self.platform_key in self.data[test_key]
)
def result(self, callcount):
test_key = _current_test
per_fn = self.data[test_key]
per_platform = per_fn[self.platform_key]
if "counts" not in per_platform:
per_platform["counts"] = counts = []
else:
counts = per_platform["counts"]
if "current_count" not in per_platform:
per_platform["current_count"] = current_count = 0
else:
current_count = per_platform["current_count"]
has_count = len(counts) > current_count
if not has_count:
counts.append(callcount)
if self.write:
self._write()
result = None
else:
result = per_platform["lineno"], counts[current_count]
per_platform["current_count"] += 1
return result
def replace(self, callcount):
test_key = _current_test
per_fn = self.data[test_key]
per_platform = per_fn[self.platform_key]
counts = per_platform["counts"]
current_count = per_platform["current_count"]
if current_count < len(counts):
counts[current_count - 1] = callcount
else:
counts[-1] = callcount
if self.write:
self._write()
def _header(self):
return (
"# %s\n"
"# This file is written out on a per-environment basis.\n"
"# For each test in aaa_profiling, the corresponding "
"function and \n"
"# environment is located within this file. "
"If it doesn't exist,\n"
"# the test is skipped.\n"
"# If a callcount does exist, it is compared "
"to what we received. \n"
"# assertions are raised if the counts do not match.\n"
"# \n"
"# To add a new callcount test, apply the function_call_count \n"
"# decorator and re-run the tests using the --write-profiles \n"
"# option - this file will be rewritten including the new count.\n"
"# \n"
) % (self.fname)
def _read(self):
try:
profile_f = open(self.fname)
except IOError:
return
for lineno, line in enumerate(profile_f):
line = line.strip()
if not line or line.startswith("#"):
continue
test_key, platform_key, counts = line.split()
per_fn = self.data[test_key]
per_platform = per_fn[platform_key]
c = [int(count) for count in counts.split(",")]
per_platform["counts"] = c
per_platform["lineno"] = lineno + 1
per_platform["current_count"] = 0
profile_f.close()
def _write(self):
print(("Writing profile file %s" % self.fname))
profile_f = open(self.fname, "w")
profile_f.write(self._header())
for test_key in sorted(self.data):
per_fn = self.data[test_key]
profile_f.write("\n# TEST: %s\n\n" % test_key)
for platform_key in sorted(per_fn):
per_platform = per_fn[platform_key]
c = ",".join(str(count) for count in per_platform["counts"])
profile_f.write("%s %s %s\n" % (test_key, platform_key, c))
profile_f.close()
def function_call_count(variance=0.05, times=1, warmup=0):
"""Assert a target for a test case's function call count.
The main purpose of this assertion is to detect changes in
callcounts for various functions - the actual number is not as important.
Callcounts are stored in a file keyed to Python version and OS platform
information. This file is generated automatically for new tests,
and versioned so that unexpected changes in callcounts will be detected.
"""
def decorate(fn):
def wrap(*args, **kw):
for warm in range(warmup):
fn(*args, **kw)
timerange = range(times)
with count_functions(variance=variance):
for time in timerange:
rv = fn(*args, **kw)
return rv
return update_wrapper(wrap, fn)
return decorate
@contextlib.contextmanager
def count_functions(variance=0.05):
if cProfile is None:
raise config._skip_test_exception("cProfile is not installed")
if not _profile_stats.has_stats() and not _profile_stats.write:
config.skip_test(
"No profiling stats available on this "
"platform for this function. Run tests with "
"--write-profiles to add statistics to %s for "
"this platform." % _profile_stats.short_fname
)
gc_collect()
pr = cProfile.Profile()
pr.enable()
# began = time.time()
yield
# ended = time.time()
pr.disable()
# s = compat.StringIO()
stats = pstats.Stats(pr, stream=sys.stdout)
# timespent = ended - began
callcount = stats.total_calls
expected = _profile_stats.result(callcount)
if expected is None:
expected_count = None
else:
line_no, expected_count = expected
print(("Pstats calls: %d Expected %s" % (callcount, expected_count)))
stats.sort_stats("cumulative")
stats.print_stats()
if expected_count:
deviance = int(callcount * variance)
failed = abs(callcount - expected_count) > deviance
if failed or _profile_stats.force_write:
if _profile_stats.write:
_profile_stats.replace(callcount)
else:
raise AssertionError(
"Adjusted function call count %s not within %s%% "
"of expected %s, platform %s. Rerun with "
"--write-profiles to "
"regenerate this callcount."
% (
callcount,
(variance * 100),
expected_count,
_profile_stats.platform_key,
)
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/fixtures.py | # testing/fixtures.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
import sys
import sqlalchemy as sa
from . import assertions
from . import config
from . import schema
from .engines import drop_all_tables
from .entities import BasicEntity
from .entities import ComparableEntity
from .util import adict
from .. import event
from .. import util
from ..ext.declarative import declarative_base
from ..ext.declarative import DeclarativeMeta
from ..schema import sort_tables_and_constraints
# whether or not we use unittest changes things dramatically,
# as far as how pytest collection works.
class TestBase(object):
# A sequence of database names to always run, regardless of the
# constraints below.
__whitelist__ = ()
# A sequence of requirement names matching testing.requires decorators
__requires__ = ()
# A sequence of dialect names to exclude from the test class.
__unsupported_on__ = ()
# If present, test class is only runnable for the *single* specified
# dialect. If you need multiple, use __unsupported_on__ and invert.
__only_on__ = None
# A sequence of no-arg callables. If any are True, the entire testcase is
# skipped.
__skip_if__ = None
def assert_(self, val, msg=None):
assert val, msg
# apparently a handful of tests are doing this....OK
def setup(self):
if hasattr(self, "setUp"):
self.setUp()
def teardown(self):
if hasattr(self, "tearDown"):
self.tearDown()
@config.fixture()
def connection(self):
conn = config.db.connect()
trans = conn.begin()
try:
yield conn
finally:
trans.rollback()
conn.close()
# propose a replacement for @testing.provide_metadata.
# the problem with this is that TablesTest below has a ".metadata"
# attribute already which is accessed directly as part of the
# @testing.provide_metadata pattern. Might need to call this _metadata
# for it to be useful.
# @config.fixture()
# def metadata(self):
# """Provide bound MetaData for a single test, dropping afterwards."""
#
# from . import engines
# metadata = schema.MetaData(config.db)
# try:
# yield metadata
# finally:
# engines.drop_all_tables(metadata, config.db)
class TablesTest(TestBase):
# 'once', None
run_setup_bind = "once"
# 'once', 'each', None
run_define_tables = "once"
# 'once', 'each', None
run_create_tables = "once"
# 'once', 'each', None
run_inserts = "each"
# 'each', None
run_deletes = "each"
# 'once', None
run_dispose_bind = None
bind = None
metadata = None
tables = None
other = None
@classmethod
def setup_class(cls):
cls._init_class()
cls._setup_once_tables()
cls._setup_once_inserts()
@classmethod
def _init_class(cls):
if cls.run_define_tables == "each":
if cls.run_create_tables == "once":
cls.run_create_tables = "each"
assert cls.run_inserts in ("each", None)
cls.other = adict()
cls.tables = adict()
cls.bind = cls.setup_bind()
cls.metadata = sa.MetaData()
cls.metadata.bind = cls.bind
@classmethod
def _setup_once_inserts(cls):
if cls.run_inserts == "once":
cls._load_fixtures()
with cls.bind.begin() as conn:
cls.insert_data(conn)
@classmethod
def _setup_once_tables(cls):
if cls.run_define_tables == "once":
cls.define_tables(cls.metadata)
if cls.run_create_tables == "once":
cls.metadata.create_all(cls.bind)
cls.tables.update(cls.metadata.tables)
def _setup_each_tables(self):
if self.run_define_tables == "each":
self.define_tables(self.metadata)
if self.run_create_tables == "each":
self.metadata.create_all(self.bind)
self.tables.update(self.metadata.tables)
elif self.run_create_tables == "each":
self.metadata.create_all(self.bind)
def _setup_each_inserts(self):
if self.run_inserts == "each":
self._load_fixtures()
with self.bind.begin() as conn:
self.insert_data(conn)
def _teardown_each_tables(self):
if self.run_define_tables == "each":
self.tables.clear()
if self.run_create_tables == "each":
drop_all_tables(self.metadata, self.bind)
self.metadata.clear()
elif self.run_create_tables == "each":
drop_all_tables(self.metadata, self.bind)
# no need to run deletes if tables are recreated on setup
if self.run_define_tables != "each" and self.run_deletes == "each":
with self.bind.connect() as conn:
for table in reversed(
[
t
for (t, fks) in sort_tables_and_constraints(
self.metadata.tables.values()
)
if t is not None
]
):
try:
conn.execute(table.delete())
except sa.exc.DBAPIError as ex:
util.print_(
("Error emptying table %s: %r" % (table, ex)),
file=sys.stderr,
)
def setup(self):
self._setup_each_tables()
self._setup_each_inserts()
def teardown(self):
self._teardown_each_tables()
@classmethod
def _teardown_once_metadata_bind(cls):
if cls.run_create_tables:
drop_all_tables(cls.metadata, cls.bind)
if cls.run_dispose_bind == "once":
cls.dispose_bind(cls.bind)
cls.metadata.bind = None
if cls.run_setup_bind is not None:
cls.bind = None
@classmethod
def teardown_class(cls):
cls._teardown_once_metadata_bind()
@classmethod
def setup_bind(cls):
return config.db
@classmethod
def dispose_bind(cls, bind):
if hasattr(bind, "dispose"):
bind.dispose()
elif hasattr(bind, "close"):
bind.close()
@classmethod
def define_tables(cls, metadata):
pass
@classmethod
def fixtures(cls):
return {}
@classmethod
def insert_data(cls, connection):
pass
def sql_count_(self, count, fn):
self.assert_sql_count(self.bind, fn, count)
def sql_eq_(self, callable_, statements):
self.assert_sql(self.bind, callable_, statements)
@classmethod
def _load_fixtures(cls):
"""Insert rows as represented by the fixtures() method."""
headers, rows = {}, {}
for table, data in cls.fixtures().items():
if len(data) < 2:
continue
if isinstance(table, util.string_types):
table = cls.tables[table]
headers[table] = data[0]
rows[table] = data[1:]
for table, fks in sort_tables_and_constraints(
cls.metadata.tables.values()
):
if table is None:
continue
if table not in headers:
continue
cls.bind.execute(
table.insert(),
[
dict(zip(headers[table], column_values))
for column_values in rows[table]
],
)
class RemovesEvents(object):
@util.memoized_property
def _event_fns(self):
return set()
def event_listen(self, target, name, fn, **kw):
self._event_fns.add((target, name, fn))
event.listen(target, name, fn, **kw)
def teardown(self):
for key in self._event_fns:
event.remove(*key)
super_ = super(RemovesEvents, self)
if hasattr(super_, "teardown"):
super_.teardown()
class _ORMTest(object):
@classmethod
def teardown_class(cls):
sa.orm.session.close_all_sessions()
sa.orm.clear_mappers()
class ORMTest(_ORMTest, TestBase):
pass
class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults):
# 'once', 'each', None
run_setup_classes = "once"
# 'once', 'each', None
run_setup_mappers = "each"
classes = None
@classmethod
def setup_class(cls):
cls._init_class()
if cls.classes is None:
cls.classes = adict()
cls._setup_once_tables()
cls._setup_once_classes()
cls._setup_once_mappers()
cls._setup_once_inserts()
@classmethod
def teardown_class(cls):
cls._teardown_once_class()
cls._teardown_once_metadata_bind()
def setup(self):
self._setup_each_tables()
self._setup_each_classes()
self._setup_each_mappers()
self._setup_each_inserts()
def teardown(self):
sa.orm.session.close_all_sessions()
self._teardown_each_mappers()
self._teardown_each_classes()
self._teardown_each_tables()
@classmethod
def _teardown_once_class(cls):
cls.classes.clear()
_ORMTest.teardown_class()
@classmethod
def _setup_once_classes(cls):
if cls.run_setup_classes == "once":
cls._with_register_classes(cls.setup_classes)
@classmethod
def _setup_once_mappers(cls):
if cls.run_setup_mappers == "once":
cls._with_register_classes(cls.setup_mappers)
def _setup_each_mappers(self):
if self.run_setup_mappers == "each":
self._with_register_classes(self.setup_mappers)
def _setup_each_classes(self):
if self.run_setup_classes == "each":
self._with_register_classes(self.setup_classes)
@classmethod
def _with_register_classes(cls, fn):
"""Run a setup method, framing the operation with a Base class
that will catch new subclasses to be established within
the "classes" registry.
"""
cls_registry = cls.classes
class FindFixture(type):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
type.__init__(cls, classname, bases, dict_)
class _Base(util.with_metaclass(FindFixture, object)):
pass
class Basic(BasicEntity, _Base):
pass
class Comparable(ComparableEntity, _Base):
pass
cls.Basic = Basic
cls.Comparable = Comparable
fn()
def _teardown_each_mappers(self):
# some tests create mappers in the test bodies
# and will define setup_mappers as None -
# clear mappers in any case
if self.run_setup_mappers != "once":
sa.orm.clear_mappers()
def _teardown_each_classes(self):
if self.run_setup_classes != "once":
self.classes.clear()
@classmethod
def setup_classes(cls):
pass
@classmethod
def setup_mappers(cls):
pass
class DeclarativeMappedTest(MappedTest):
run_setup_classes = "once"
run_setup_mappers = "once"
@classmethod
def _setup_once_tables(cls):
pass
@classmethod
def _with_register_classes(cls, fn):
cls_registry = cls.classes
class FindFixtureDeclarative(DeclarativeMeta):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
DeclarativeMeta.__init__(cls, classname, bases, dict_)
class DeclarativeBasic(object):
__table_cls__ = schema.Table
_DeclBase = declarative_base(
metadata=cls.metadata,
metaclass=FindFixtureDeclarative,
cls=DeclarativeBasic,
)
cls.DeclarativeBasic = _DeclBase
# sets up cls.Basic which is helpful for things like composite
# classes
super(DeclarativeMappedTest, cls)._with_register_classes(fn)
if cls.metadata.tables and cls.run_create_tables:
cls.metadata.create_all(config.db)
class ComputedReflectionFixtureTest(TablesTest):
run_inserts = run_deletes = None
__backend__ = True
__requires__ = ("computed_columns", "table_reflection")
regexp = re.compile(r"[\[\]\(\)\s`'\"]*")
def normalize(self, text):
return self.regexp.sub("", text).lower()
@classmethod
def define_tables(cls, metadata):
from .. import Integer
from .. import testing
from ..schema import Column
from ..schema import Computed
from ..schema import Table
Table(
"computed_default_table",
metadata,
Column("id", Integer, primary_key=True),
Column("normal", Integer),
Column("computed_col", Integer, Computed("normal + 42")),
Column("with_default", Integer, server_default="42"),
)
t = Table(
"computed_column_table",
metadata,
Column("id", Integer, primary_key=True),
Column("normal", Integer),
Column("computed_no_flag", Integer, Computed("normal + 42")),
)
if testing.requires.schemas.enabled:
t2 = Table(
"computed_column_table",
metadata,
Column("id", Integer, primary_key=True),
Column("normal", Integer),
Column("computed_no_flag", Integer, Computed("normal / 42")),
schema=config.test_schema,
)
if testing.requires.computed_columns_virtual.enabled:
t.append_column(
Column(
"computed_virtual",
Integer,
Computed("normal + 2", persisted=False),
)
)
if testing.requires.schemas.enabled:
t2.append_column(
Column(
"computed_virtual",
Integer,
Computed("normal / 2", persisted=False),
)
)
if testing.requires.computed_columns_stored.enabled:
t.append_column(
Column(
"computed_stored",
Integer,
Computed("normal - 42", persisted=True),
)
)
if testing.requires.schemas.enabled:
t2.append_column(
Column(
"computed_stored",
Integer,
Computed("normal * 42", persisted=True),
)
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/schema.py | # testing/schema.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import config
from . import exclusions
from .. import event
from .. import schema
__all__ = ["Table", "Column"]
table_options = {}
def Table(*args, **kw):
"""A schema.Table wrapper/hook for dialect-specific tweaks."""
test_opts = {k: kw.pop(k) for k in list(kw) if k.startswith("test_")}
kw.update(table_options)
if exclusions.against(config._current, "mysql"):
if (
"mysql_engine" not in kw
and "mysql_type" not in kw
and "autoload_with" not in kw
):
if "test_needs_fk" in test_opts or "test_needs_acid" in test_opts:
kw["mysql_engine"] = "InnoDB"
else:
kw["mysql_engine"] = "MyISAM"
# Apply some default cascading rules for self-referential foreign keys.
# MySQL InnoDB has some issues around selecting self-refs too.
if exclusions.against(config._current, "firebird"):
table_name = args[0]
unpack = config.db.dialect.identifier_preparer.unformat_identifiers
# Only going after ForeignKeys in Columns. May need to
# expand to ForeignKeyConstraint too.
fks = [
fk
for col in args
if isinstance(col, schema.Column)
for fk in col.foreign_keys
]
for fk in fks:
# root around in raw spec
ref = fk._colspec
if isinstance(ref, schema.Column):
name = ref.table.name
else:
# take just the table name: on FB there cannot be
# a schema, so the first element is always the
# table name, possibly followed by the field name
name = unpack(ref)[0]
if name == table_name:
if fk.ondelete is None:
fk.ondelete = "CASCADE"
if fk.onupdate is None:
fk.onupdate = "CASCADE"
return schema.Table(*args, **kw)
def Column(*args, **kw):
"""A schema.Column wrapper/hook for dialect-specific tweaks."""
test_opts = {k: kw.pop(k) for k in list(kw) if k.startswith("test_")}
if not config.requirements.foreign_key_ddl.enabled_for_config(config):
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
if test_opts.get("test_needs_autoincrement", False) and kw.get(
"primary_key", False
):
if col.default is None and col.server_default is None:
col.autoincrement = True
# allow any test suite to pick up on this
col.info["test_needs_autoincrement"] = True
# hardcoded rule for firebird, oracle; this should
# be moved out
if exclusions.against(config._current, "firebird", "oracle"):
def add_seq(c, tbl):
c._init_items(
schema.Sequence(
_truncate_name(
config.db.dialect, tbl.name + "_" + c.name + "_seq"
),
optional=True,
)
)
event.listen(col, "after_parent_attach", add_seq, propagate=True)
return col
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return (
name[0 : max(dialect.max_identifier_length - 6, 0)]
+ "_"
+ hex(hash(name) % 64)[2:]
)
else:
return name
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/mock.py | # testing/mock.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Import stub for mock library.
"""
from __future__ import absolute_import
from ..util import py33
if py33:
from unittest.mock import MagicMock
from unittest.mock import Mock
from unittest.mock import call
from unittest.mock import patch
from unittest.mock import ANY
else:
try:
from mock import MagicMock # noqa
from mock import Mock # noqa
from mock import call # noqa
from mock import patch # noqa
from mock import ANY # noqa
except ImportError:
raise ImportError(
"SQLAlchemy's test suite requires the "
"'mock' library as of 0.8.2."
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/plugin/bootstrap.py | """
Bootstrapper for test framework plugins.
The entire rationale for this system is to get the modules in plugin/
imported without importing all of the supporting library, so that we can
set up things for testing before coverage starts.
The rationale for all of plugin/ being *in* the supporting library in the
first place is so that the testing and plugin suite is available to other
libraries, mainly external SQLAlchemy and Alembic dialects, to make use
of the same test environment and standard suites available to
SQLAlchemy/Alembic themselves without the need to ship/install a separate
package outside of SQLAlchemy.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0.
"""
import os
import sys
bootstrap_file = locals()["bootstrap_file"]
to_bootstrap = locals()["to_bootstrap"]
def load_file_as_module(name):
path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name)
if sys.version_info >= (3, 3):
from importlib import machinery
mod = machinery.SourceFileLoader(name, path).load_module()
else:
import imp
mod = imp.load_source(name, path)
return mod
if to_bootstrap == "pytest":
sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin")
else:
raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/plugin/plugin_base.py | # plugin/plugin_base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Testing extensions.
this module is designed to work as a testing-framework-agnostic library,
created so that multiple test frameworks can be supported at once
(mostly so that we can migrate to new ones). The current target
is pytest.
"""
from __future__ import absolute_import
import abc
import re
import sys
py3k = sys.version_info >= (3, 0)
if py3k:
import configparser
ABC = abc.ABC
else:
import ConfigParser as configparser
import collections as collections_abc # noqa
class ABC(object):
__metaclass__ = abc.ABCMeta
# late imports
fixtures = None
engines = None
exclusions = None
warnings = None
profiling = None
assertions = None
requirements = None
config = None
testing = None
util = None
file_config = None
logging = None
include_tags = set()
exclude_tags = set()
options = None
def setup_options(make_option):
make_option(
"--log-info",
action="callback",
type="string",
callback=_log,
help="turn on info logging for <LOG> (multiple OK)",
)
make_option(
"--log-debug",
action="callback",
type="string",
callback=_log,
help="turn on debug logging for <LOG> (multiple OK)",
)
make_option(
"--db",
action="append",
type="string",
dest="db",
help="Use prefab database uri. Multiple OK, "
"first one is run by default.",
)
make_option(
"--dbs",
action="callback",
zeroarg_callback=_list_dbs,
help="List available prefab dbs",
)
make_option(
"--dburi",
action="append",
type="string",
dest="dburi",
help="Database uri. Multiple OK, " "first one is run by default.",
)
make_option(
"--dropfirst",
action="store_true",
dest="dropfirst",
help="Drop all tables in the target database first",
)
make_option(
"--backend-only",
action="store_true",
dest="backend_only",
help="Run only tests marked with __backend__ or __sparse_backend__",
)
make_option(
"--nomemory",
action="store_true",
dest="nomemory",
help="Don't run memory profiling tests",
)
make_option(
"--postgresql-templatedb",
type="string",
help="name of template database to use for PostgreSQL "
"CREATE DATABASE (defaults to current database)",
)
make_option(
"--low-connections",
action="store_true",
dest="low_connections",
help="Use a low number of distinct connections - "
"i.e. for Oracle TNS",
)
make_option(
"--write-idents",
type="string",
dest="write_idents",
help="write out generated follower idents to <file>, "
"when -n<num> is used",
)
make_option(
"--reversetop",
action="store_true",
dest="reversetop",
default=False,
help="Use a random-ordering set implementation in the ORM "
"(helps reveal dependency issues)",
)
make_option(
"--requirements",
action="callback",
type="string",
callback=_requirements_opt,
help="requirements class for testing, overrides setup.cfg",
)
make_option(
"--with-cdecimal",
action="store_true",
dest="cdecimal",
default=False,
help="Monkeypatch the cdecimal library into Python 'decimal' "
"for all tests",
)
make_option(
"--include-tag",
action="callback",
callback=_include_tag,
type="string",
help="Include tests with tag <tag>",
)
make_option(
"--exclude-tag",
action="callback",
callback=_exclude_tag,
type="string",
help="Exclude tests with tag <tag>",
)
make_option(
"--write-profiles",
action="store_true",
dest="write_profiles",
default=False,
help="Write/update failing profiling data.",
)
make_option(
"--force-write-profiles",
action="store_true",
dest="force_write_profiles",
default=False,
help="Unconditionally write/update profiling data.",
)
def configure_follower(follower_ident):
"""Configure required state for a follower.
This invokes in the parent process and typically includes
database creation.
"""
from sqlalchemy.testing import provision
provision.FOLLOWER_IDENT = follower_ident
def memoize_important_follower_config(dict_):
"""Store important configuration we will need to send to a follower.
This invokes in the parent process after normal config is set up.
This is necessary as pytest seems to not be using forking, so we
start with nothing in memory, *but* it isn't running our argparse
callables, so we have to just copy all of that over.
"""
dict_["memoized_config"] = {
"include_tags": include_tags,
"exclude_tags": exclude_tags,
}
def restore_important_follower_config(dict_):
"""Restore important configuration needed by a follower.
This invokes in the follower process.
"""
global include_tags, exclude_tags
include_tags.update(dict_["memoized_config"]["include_tags"])
exclude_tags.update(dict_["memoized_config"]["exclude_tags"])
def read_config():
global file_config
file_config = configparser.ConfigParser()
file_config.read(["setup.cfg", "test.cfg"])
def pre_begin(opt):
"""things to set up early, before coverage might be setup."""
global options
options = opt
for fn in pre_configure:
fn(options, file_config)
def set_coverage_flag(value):
options.has_coverage = value
def post_begin():
"""things to set up later, once we know coverage is running."""
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(options, file_config)
# late imports, has to happen after config.
global util, fixtures, engines, exclusions, assertions
global warnings, profiling, config, testing
from sqlalchemy import testing # noqa
from sqlalchemy.testing import fixtures, engines, exclusions # noqa
from sqlalchemy.testing import assertions, warnings, profiling # noqa
from sqlalchemy.testing import config # noqa
from sqlalchemy import util # noqa
warnings.setup_filters()
def _log(opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith("-info"):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith("-debug"):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print("Available --db options (use --dburi to override)")
for macro in sorted(file_config.options("db")):
print("%20s\t%s" % (macro, file_config.get("db", macro)))
sys.exit(0)
def _requirements_opt(opt_str, value, parser):
_setup_requirements(value)
def _exclude_tag(opt_str, value, parser):
exclude_tags.add(value.replace("-", "_"))
def _include_tag(opt_str, value, parser):
include_tags.add(value.replace("-", "_"))
pre_configure = []
post_configure = []
def pre(fn):
pre_configure.append(fn)
return fn
def post(fn):
post_configure.append(fn)
return fn
@pre
def _setup_options(opt, file_config):
global options
options = opt
@pre
def _set_nomemory(opt, file_config):
if opt.nomemory:
exclude_tags.add("memory_intensive")
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import cdecimal
sys.modules["decimal"] = cdecimal
@post
def _init_symbols(options, file_config):
from sqlalchemy.testing import config
config._fixture_functions = _fixture_fn_class()
@post
def _engine_uri(options, file_config):
from sqlalchemy.testing import config
from sqlalchemy import testing
from sqlalchemy.testing import provision
if options.dburi:
db_urls = list(options.dburi)
else:
db_urls = []
if options.db:
for db_token in options.db:
for db in re.split(r"[,\s]+", db_token):
if db not in file_config.options("db"):
raise RuntimeError(
"Unknown URI specifier '%s'. "
"Specify --dbs for known uris." % db
)
else:
db_urls.append(file_config.get("db", db))
if not db_urls:
db_urls.append(file_config.get("db", "default"))
config._current = None
for db_url in db_urls:
if options.write_idents and provision.FOLLOWER_IDENT: # != 'master':
with open(options.write_idents, "a") as file_:
file_.write(provision.FOLLOWER_IDENT + " " + db_url + "\n")
cfg = provision.setup_config(
db_url, options, file_config, provision.FOLLOWER_IDENT
)
if not config._current:
cfg.set_as_current(cfg, testing)
@post
def _requirements(options, file_config):
requirement_cls = file_config.get("sqla_testing", "requirement_cls")
_setup_requirements(requirement_cls)
def _setup_requirements(argument):
from sqlalchemy.testing import config
from sqlalchemy import testing
if config.requirements is not None:
return
modname, clsname = argument.split(":")
# importlib.import_module() only introduced in 2.7, a little
# late
mod = __import__(modname)
for component in modname.split(".")[1:]:
mod = getattr(mod, component)
req_cls = getattr(mod, clsname)
config.requirements = testing.requires = req_cls()
@post
def _prep_testing_database(options, file_config):
from sqlalchemy.testing import config, util
from sqlalchemy.testing.exclusions import against
from sqlalchemy import schema, inspect
if options.dropfirst:
for cfg in config.Config.all_configs():
e = cfg.db
inspector = inspect(e)
try:
view_names = inspector.get_view_names()
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(
schema._DropView(
schema.Table(vname, schema.MetaData())
)
)
if config.requirements.schemas.enabled_for_config(cfg):
try:
view_names = inspector.get_view_names(schema="test_schema")
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(
schema._DropView(
schema.Table(
vname,
schema.MetaData(),
schema="test_schema",
)
)
)
util.drop_all_tables(e, inspector)
if config.requirements.schemas.enabled_for_config(cfg):
util.drop_all_tables(e, inspector, schema=cfg.test_schema)
if against(cfg, "postgresql"):
from sqlalchemy.dialects import postgresql
for enum in inspector.get_enums("*"):
e.execute(
postgresql.DropEnumType(
postgresql.ENUM(
name=enum["name"], schema=enum["schema"]
)
)
)
@post
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm.util import randomize_unitofwork
randomize_unitofwork()
@post
def _post_setup_options(opt, file_config):
from sqlalchemy.testing import config
config.options = options
config.file_config = file_config
@post
def _setup_profiling(options, file_config):
from sqlalchemy.testing import profiling
profiling._profile_stats = profiling.ProfileStatsFile(
file_config.get("sqla_testing", "profile_file")
)
def want_class(name, cls):
if not issubclass(cls, fixtures.TestBase):
return False
elif name.startswith("_"):
return False
elif (
config.options.backend_only
and not getattr(cls, "__backend__", False)
and not getattr(cls, "__sparse_backend__", False)
):
return False
else:
return True
def want_method(cls, fn):
if not fn.__name__.startswith("test_"):
return False
elif fn.__module__ is None:
return False
elif include_tags:
return (
hasattr(cls, "__tags__")
and exclusions.tags(cls.__tags__).include_test(
include_tags, exclude_tags
)
) or (
hasattr(fn, "_sa_exclusion_extend")
and fn._sa_exclusion_extend.include_test(
include_tags, exclude_tags
)
)
elif exclude_tags and hasattr(cls, "__tags__"):
return exclusions.tags(cls.__tags__).include_test(
include_tags, exclude_tags
)
elif exclude_tags and hasattr(fn, "_sa_exclusion_extend"):
return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags)
else:
return True
def generate_sub_tests(cls, module):
if getattr(cls, "__backend__", False) or getattr(
cls, "__sparse_backend__", False
):
sparse = getattr(cls, "__sparse_backend__", False)
for cfg in _possible_configs_for_cls(cls, sparse=sparse):
orig_name = cls.__name__
# we can have special chars in these names except for the
# pytest junit plugin, which is tripped up by the brackets
# and periods, so sanitize
alpha_name = re.sub(r"[_\[\]\.]+", "_", cfg.name)
alpha_name = re.sub(r"_+$", "", alpha_name)
name = "%s_%s" % (cls.__name__, alpha_name)
subcls = type(
name,
(cls,),
{"_sa_orig_cls_name": orig_name, "__only_on_config__": cfg},
)
setattr(module, name, subcls)
yield subcls
else:
yield cls
def start_test_class(cls):
_do_skips(cls)
_setup_engine(cls)
def stop_test_class(cls):
# from sqlalchemy import inspect
# assert not inspect(testing.db).get_table_names()
engines.testing_reaper._stop_test_ctx()
try:
if not options.low_connections:
assertions.global_cleanup_assertions()
finally:
_restore_engine()
def _restore_engine():
config._current.reset(testing)
def final_process_cleanup():
engines.testing_reaper._stop_test_ctx_aggressive()
assertions.global_cleanup_assertions()
_restore_engine()
def _setup_engine(cls):
if getattr(cls, "__engine_options__", None):
eng = engines.testing_engine(options=cls.__engine_options__)
config._current.push_engine(eng, testing)
def before_test(test, test_module_name, test_class, test_name):
# format looks like:
# "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause"
name = getattr(test_class, "_sa_orig_cls_name", test_class.__name__)
id_ = "%s.%s.%s" % (test_module_name, name, test_name)
profiling._current_test = id_
def after_test(test):
engines.testing_reaper._after_test_ctx()
def _possible_configs_for_cls(cls, reasons=None, sparse=False):
all_configs = set(config.Config.all_configs())
if cls.__unsupported_on__:
spec = exclusions.db_spec(*cls.__unsupported_on__)
for config_obj in list(all_configs):
if spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, "__only_on__", None):
spec = exclusions.db_spec(*util.to_list(cls.__only_on__))
for config_obj in list(all_configs):
if not spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, "__only_on_config__", None):
all_configs.intersection_update([cls.__only_on_config__])
if hasattr(cls, "__requires__"):
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__requires__:
check = getattr(requirements, requirement)
skip_reasons = check.matching_config_reasons(config_obj)
if skip_reasons:
all_configs.remove(config_obj)
if reasons is not None:
reasons.extend(skip_reasons)
break
if hasattr(cls, "__prefer_requires__"):
non_preferred = set()
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__prefer_requires__:
check = getattr(requirements, requirement)
if not check.enabled_for_config(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if sparse:
# pick only one config from each base dialect
# sorted so we get the same backend each time selecting the highest
# server version info.
per_dialect = {}
for cfg in reversed(
sorted(
all_configs,
key=lambda cfg: (
cfg.db.name,
cfg.db.dialect.server_version_info,
),
)
):
db = cfg.db.name
if db not in per_dialect:
per_dialect[db] = cfg
return per_dialect.values()
return all_configs
def _do_skips(cls):
reasons = []
all_configs = _possible_configs_for_cls(cls, reasons)
if getattr(cls, "__skip_if__", False):
for c in getattr(cls, "__skip_if__"):
if c():
config.skip_test(
"'%s' skipped by %s" % (cls.__name__, c.__name__)
)
if not all_configs:
msg = "'%s' unsupported on any DB implementation %s%s" % (
cls.__name__,
", ".join(
"'%s(%s)+%s'"
% (
config_obj.db.name,
".".join(
str(dig)
for dig in exclusions._server_version(config_obj.db)
),
config_obj.db.driver,
)
for config_obj in config.Config.all_configs()
),
", ".join(reasons),
)
config.skip_test(msg)
elif hasattr(cls, "__prefer_backends__"):
non_preferred = set()
spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__))
for config_obj in all_configs:
if not spec(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if config._current not in all_configs:
_setup_config(all_configs.pop(), cls)
def _setup_config(config_obj, ctx):
config._current.push(config_obj, testing)
class FixtureFunctions(ABC):
@abc.abstractmethod
def skip_test_exception(self, *arg, **kw):
raise NotImplementedError()
@abc.abstractmethod
def combinations(self, *args, **kw):
raise NotImplementedError()
@abc.abstractmethod
def param_ident(self, *args, **kw):
raise NotImplementedError()
@abc.abstractmethod
def fixture(self, *arg, **kw):
raise NotImplementedError()
def get_current_test_name(self):
raise NotImplementedError()
_fixture_fn_class = None
def set_fixture_functions(fixture_fn_class):
global _fixture_fn_class
_fixture_fn_class = fixture_fn_class
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/plugin/pytestplugin.py | try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import argparse
import collections
from functools import update_wrapper
import inspect
import itertools
import operator
import os
import re
import sys
import pytest
try:
import typing
except ImportError:
pass
else:
if typing.TYPE_CHECKING:
from typing import Sequence
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(
self, parser, namespace, values, option_string=None
):
callback_(option_string, values, parser)
kw["action"] = CallableAction
zeroarg_callback = kw.pop("zeroarg_callback", None)
if zeroarg_callback:
class CallableAction(argparse.Action):
def __init__(
self,
option_strings,
dest,
default=False,
required=False,
help=None, # noqa
):
super(CallableAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=True,
default=default,
required=required,
help=help,
)
def __call__(
self, parser, namespace, values, option_string=None
):
zeroarg_callback(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
pytest.register_assert_rewrite("sqlalchemy.testing.assertions")
if hasattr(config, "slaveinput"):
plugin_base.restore_important_follower_config(config.slaveinput)
plugin_base.configure_follower(config.slaveinput["follower_ident"])
else:
if config.option.write_idents and os.path.exists(
config.option.write_idents
):
os.remove(config.option.write_idents)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(
bool(getattr(config.option, "cov_source", False))
)
plugin_base.set_fixture_functions(PytestFixtureFunctions)
def pytest_sessionstart(session):
plugin_base.post_begin()
def pytest_sessionfinish(session):
plugin_base.final_process_cleanup()
if has_xdist:
import uuid
def pytest_configure_node(node):
# the master for each node fills slaveinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.slaveinput)
node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
from sqlalchemy.testing import provision
provision.create_follower_db(node.slaveinput["follower_ident"])
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
provision.drop_follower_db(node.slaveinput["follower_ident"])
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(
lambda: collections.defaultdict(list)
)
items[:] = [
item
for item in items
if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")
]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module
):
if sub_cls is not test_class.cls:
per_cls_dict = rebuilt_items[test_class.cls]
# in pytest 5.4.0
# for inst in pytest.Class.from_parent(
# test_class.parent.parent, name=sub_cls.__name__
# ).collect():
for inst in pytest.Class(
sub_cls.__name__, parent=test_class.parent.parent
).collect():
for t in inst.collect():
per_cls_dict[t.name].append(t)
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls][item.name])
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(
newitems,
key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name,
),
)
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(name, obj):
# in pytest 5.4.0
# return [
# pytest.Class.from_parent(collector,
# name=parametrize_cls.__name__)
# for parametrize_cls in _parametrize_cls(collector.module, obj)
# ]
return [
pytest.Class(parametrize_cls.__name__, parent=collector)
for parametrize_cls in _parametrize_cls(collector.module, obj)
]
elif (
inspect.isfunction(obj)
and isinstance(collector, pytest.Instance)
and plugin_base.want_method(collector.cls, obj)
):
# None means, fall back to default logic, which includes
# method-level parametrize
return None
else:
# empty list means skip this item
return []
_current_class = None
def _parametrize_cls(module, cls):
"""implement a class-based version of pytest parametrize."""
if "_sa_parametrize" not in cls.__dict__:
return [cls]
_sa_parametrize = cls._sa_parametrize
classes = []
for full_param_set in itertools.product(
*[params for argname, params in _sa_parametrize]
):
cls_variables = {}
for argname, param in zip(
[_sa_param[0] for _sa_param in _sa_parametrize], full_param_set
):
if not argname:
raise TypeError("need argnames for class-based combinations")
argname_split = re.split(r",\s*", argname)
for arg, val in zip(argname_split, param.values):
cls_variables[arg] = val
parametrized_name = "_".join(
# token is a string, but in py2k pytest is giving us a unicode,
# so call str() on it.
str(re.sub(r"\W", "", token))
for param in full_param_set
for token in param.id.split("-")
)
name = "%s_%s" % (cls.__name__, parametrized_name)
newcls = type.__new__(type, name, (cls,), cls_variables)
setattr(module, name, newcls)
classes.append(newcls)
return classes
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
def finalize():
global _current_class
class_teardown(item.parent.parent)
_current_class = None
item.parent.parent.addfinalizer(finalize)
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# pytest assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(
item, item.parent.module.__name__, item.parent.cls, item.name
)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls)
def getargspec(fn):
if sys.version_info.major == 3:
return inspect.getfullargspec(fn)
else:
return inspect.getargspec(fn)
def _pytest_fn_decorator(target):
"""Port of langhelpers.decorator with pytest-specific tricks."""
from sqlalchemy.util.langhelpers import format_argspec_plus
from sqlalchemy.util.compat import inspect_getfullargspec
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def decorate(fn, add_positional_parameters=()):
spec = inspect_getfullargspec(fn)
if add_positional_parameters:
spec.args.extend(add_positional_parameters)
metadata = dict(target="target", fn="fn", name=fn.__name__)
metadata.update(format_argspec_plus(spec, grouped=False))
code = (
"""\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
"""
% metadata
)
decorated = _exec_code_in_env(
code, {"target": target, "fn": fn}, fn.__name__
)
if not add_positional_parameters:
decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
else:
# this is the pytest hacky part. don't do a full update wrapper
# because pytest is really being sneaky about finding the args
# for the wrapped function
decorated.__module__ = fn.__module__
decorated.__name__ = fn.__name__
return decorated
return decorate
class PytestFixtureFunctions(plugin_base.FixtureFunctions):
def skip_test_exception(self, *arg, **kw):
return pytest.skip.Exception(*arg, **kw)
_combination_id_fns = {
"i": lambda obj: obj,
"r": repr,
"s": str,
"n": operator.attrgetter("__name__"),
}
def combinations(self, *arg_sets, **kw):
"""facade for pytest.mark.paramtrize.
Automatically derives argument names from the callable which in our
case is always a method on a class with positional arguments.
ids for parameter sets are derived using an optional template.
"""
from sqlalchemy.testing import exclusions
if sys.version_info.major == 3:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "__next__"):
arg_sets = list(arg_sets[0])
else:
if len(arg_sets) == 1 and hasattr(arg_sets[0], "next"):
arg_sets = list(arg_sets[0])
argnames = kw.pop("argnames", None)
def _filter_exclusions(args):
result = []
gathered_exclusions = []
for a in args:
if isinstance(a, exclusions.compound):
gathered_exclusions.append(a)
else:
result.append(a)
return result, gathered_exclusions
id_ = kw.pop("id_", None)
tobuild_pytest_params = []
has_exclusions = False
if id_:
_combination_id_fns = self._combination_id_fns
# because itemgetter is not consistent for one argument vs.
# multiple, make it multiple in all cases and use a slice
# to omit the first argument
_arg_getter = operator.itemgetter(
0,
*[
idx
for idx, char in enumerate(id_)
if char in ("n", "r", "s", "a")
]
)
fns = [
(operator.itemgetter(idx), _combination_id_fns[char])
for idx, char in enumerate(id_)
if char in _combination_id_fns
]
for arg in arg_sets:
if not isinstance(arg, tuple):
arg = (arg,)
fn_params, param_exclusions = _filter_exclusions(arg)
parameters = _arg_getter(fn_params)[1:]
if param_exclusions:
has_exclusions = True
tobuild_pytest_params.append(
(
parameters,
param_exclusions,
"-".join(
comb_fn(getter(arg)) for getter, comb_fn in fns
),
)
)
else:
for arg in arg_sets:
if not isinstance(arg, tuple):
arg = (arg,)
fn_params, param_exclusions = _filter_exclusions(arg)
if param_exclusions:
has_exclusions = True
tobuild_pytest_params.append(
(fn_params, param_exclusions, None)
)
pytest_params = []
for parameters, param_exclusions, id_ in tobuild_pytest_params:
if has_exclusions:
parameters += (param_exclusions,)
param = pytest.param(*parameters, id=id_)
pytest_params.append(param)
def decorate(fn):
if inspect.isclass(fn):
if has_exclusions:
raise NotImplementedError(
"exclusions not supported for class level combinations"
)
if "_sa_parametrize" not in fn.__dict__:
fn._sa_parametrize = []
fn._sa_parametrize.append((argnames, pytest_params))
return fn
else:
if argnames is None:
_argnames = getargspec(fn).args[1:] # type: Sequence(str)
else:
_argnames = re.split(
r", *", argnames
) # type: Sequence(str)
if has_exclusions:
_argnames += ["_exclusions"]
@_pytest_fn_decorator
def check_exclusions(fn, *args, **kw):
_exclusions = args[-1]
if _exclusions:
exlu = exclusions.compound().add(*_exclusions)
fn = exlu(fn)
return fn(*args[0:-1], **kw)
def process_metadata(spec):
spec.args.append("_exclusions")
fn = check_exclusions(
fn, add_positional_parameters=("_exclusions",)
)
return pytest.mark.parametrize(_argnames, pytest_params)(fn)
return decorate
def param_ident(self, *parameters):
ident = parameters[0]
return pytest.param(*parameters[1:], id=ident)
def fixture(self, *arg, **kw):
return pytest.fixture(*arg, **kw)
def get_current_test_name(self):
return os.environ.get("PYTEST_CURRENT_TEST")
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_sequence.py | from .. import config
from .. import fixtures
from ..assertions import eq_
from ..config import requirements
from ..schema import Column
from ..schema import Table
from ... import Integer
from ... import MetaData
from ... import schema
from ... import Sequence
from ... import String
from ... import testing
class SequenceTest(fixtures.TablesTest):
__requires__ = ("sequences",)
__backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"seq_pk",
metadata,
Column("id", Integer, Sequence("tab_id_seq"), primary_key=True),
Column("data", String(50)),
)
Table(
"seq_opt_pk",
metadata,
Column(
"id",
Integer,
Sequence("tab_id_seq", optional=True),
primary_key=True,
),
Column("data", String(50)),
)
def test_insert_roundtrip(self):
config.db.execute(self.tables.seq_pk.insert(), data="some data")
self._assert_round_trip(self.tables.seq_pk, config.db)
def test_insert_lastrowid(self):
r = config.db.execute(self.tables.seq_pk.insert(), data="some data")
eq_(r.inserted_primary_key, [1])
def test_nextval_direct(self):
r = config.db.execute(self.tables.seq_pk.c.id.default)
eq_(r, 1)
@requirements.sequences_optional
def test_optional_seq(self):
r = config.db.execute(
self.tables.seq_opt_pk.insert(), data="some data"
)
eq_(r.inserted_primary_key, [1])
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(row, (1, "some data"))
class SequenceCompilerTest(testing.AssertsCompiledSQL, fixtures.TestBase):
__requires__ = ("sequences",)
__backend__ = True
def test_literal_binds_inline_compile(self):
table = Table(
"x",
MetaData(),
Column("y", Integer, Sequence("y_seq")),
Column("q", Integer),
)
stmt = table.insert().values(q=5)
seq_nextval = testing.db.dialect.statement_compiler(
statement=None, dialect=testing.db.dialect
).visit_sequence(Sequence("y_seq"))
self.assert_compile(
stmt,
"INSERT INTO x (y, q) VALUES (%s, 5)" % (seq_nextval,),
literal_binds=True,
dialect=testing.db.dialect,
)
class HasSequenceTest(fixtures.TestBase):
__requires__ = ("sequences",)
__backend__ = True
def test_has_sequence(self):
s1 = Sequence("user_id_seq")
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(
testing.db.dialect.has_sequence(testing.db, "user_id_seq"),
True,
)
finally:
testing.db.execute(schema.DropSequence(s1))
@testing.requires.schemas
def test_has_sequence_schema(self):
s1 = Sequence("user_id_seq", schema=config.test_schema)
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(
testing.db.dialect.has_sequence(
testing.db, "user_id_seq", schema=config.test_schema
),
True,
)
finally:
testing.db.execute(schema.DropSequence(s1))
def test_has_sequence_neg(self):
eq_(testing.db.dialect.has_sequence(testing.db, "user_id_seq"), False)
@testing.requires.schemas
def test_has_sequence_schemas_neg(self):
eq_(
testing.db.dialect.has_sequence(
testing.db, "user_id_seq", schema=config.test_schema
),
False,
)
@testing.requires.schemas
def test_has_sequence_default_not_in_remote(self):
s1 = Sequence("user_id_seq")
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(
testing.db.dialect.has_sequence(
testing.db, "user_id_seq", schema=config.test_schema
),
False,
)
finally:
testing.db.execute(schema.DropSequence(s1))
@testing.requires.schemas
def test_has_sequence_remote_not_in_default(self):
s1 = Sequence("user_id_seq", schema=config.test_schema)
testing.db.execute(schema.CreateSequence(s1))
try:
eq_(
testing.db.dialect.has_sequence(testing.db, "user_id_seq"),
False,
)
finally:
testing.db.execute(schema.DropSequence(s1))
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_results.py | import datetime
from .. import config
from .. import engines
from .. import fixtures
from ..assertions import eq_
from ..config import requirements
from ..schema import Column
from ..schema import Table
from ... import DateTime
from ... import func
from ... import Integer
from ... import select
from ... import sql
from ... import String
from ... import testing
from ... import text
class RowFetchTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"plain_pk",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
Table(
"has_dates",
metadata,
Column("id", Integer, primary_key=True),
Column("today", DateTime),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.plain_pk.insert(),
[
{"id": 1, "data": "d1"},
{"id": 2, "data": "d2"},
{"id": 3, "data": "d3"},
],
)
connection.execute(
cls.tables.has_dates.insert(),
[{"id": 1, "today": datetime.datetime(2006, 5, 12, 12, 0, 0)}],
)
def test_via_string(self):
row = config.db.execute(
self.tables.plain_pk.select().order_by(self.tables.plain_pk.c.id)
).first()
eq_(row["id"], 1)
eq_(row["data"], "d1")
def test_via_int(self):
row = config.db.execute(
self.tables.plain_pk.select().order_by(self.tables.plain_pk.c.id)
).first()
eq_(row[0], 1)
eq_(row[1], "d1")
def test_via_col_object(self):
row = config.db.execute(
self.tables.plain_pk.select().order_by(self.tables.plain_pk.c.id)
).first()
eq_(row[self.tables.plain_pk.c.id], 1)
eq_(row[self.tables.plain_pk.c.data], "d1")
@requirements.duplicate_names_in_cursor_description
def test_row_with_dupe_names(self):
result = config.db.execute(
select(
[
self.tables.plain_pk.c.data,
self.tables.plain_pk.c.data.label("data"),
]
).order_by(self.tables.plain_pk.c.id)
)
row = result.first()
eq_(result.keys(), ["data", "data"])
eq_(row, ("d1", "d1"))
def test_row_w_scalar_select(self):
"""test that a scalar select as a column is returned as such
and that type conversion works OK.
(this is half a SQLAlchemy Core test and half to catch database
backends that may have unusual behavior with scalar selects.)
"""
datetable = self.tables.has_dates
s = select([datetable.alias("x").c.today]).as_scalar()
s2 = select([datetable.c.id, s.label("somelabel")])
row = config.db.execute(s2).first()
eq_(row["somelabel"], datetime.datetime(2006, 5, 12, 12, 0, 0))
class PercentSchemaNamesTest(fixtures.TablesTest):
"""tests using percent signs, spaces in table and column names.
This is a very fringe use case, doesn't work for MySQL
or PostgreSQL. the requirement, "percent_schema_names",
is marked "skip" by default.
"""
__requires__ = ("percent_schema_names",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.tables.percent_table = Table(
"percent%table",
metadata,
Column("percent%", Integer),
Column("spaces % more spaces", Integer),
)
cls.tables.lightweight_percent_table = sql.table(
"percent%table",
sql.column("percent%"),
sql.column("spaces % more spaces"),
)
def test_single_roundtrip(self):
percent_table = self.tables.percent_table
for params in [
{"percent%": 5, "spaces % more spaces": 12},
{"percent%": 7, "spaces % more spaces": 11},
{"percent%": 9, "spaces % more spaces": 10},
{"percent%": 11, "spaces % more spaces": 9},
]:
config.db.execute(percent_table.insert(), params)
self._assert_table()
def test_executemany_roundtrip(self):
percent_table = self.tables.percent_table
config.db.execute(
percent_table.insert(), {"percent%": 5, "spaces % more spaces": 12}
)
config.db.execute(
percent_table.insert(),
[
{"percent%": 7, "spaces % more spaces": 11},
{"percent%": 9, "spaces % more spaces": 10},
{"percent%": 11, "spaces % more spaces": 9},
],
)
self._assert_table()
def _assert_table(self):
percent_table = self.tables.percent_table
lightweight_percent_table = self.tables.lightweight_percent_table
for table in (
percent_table,
percent_table.alias(),
lightweight_percent_table,
lightweight_percent_table.alias(),
):
eq_(
list(
config.db.execute(
table.select().order_by(table.c["percent%"])
)
),
[(5, 12), (7, 11), (9, 10), (11, 9)],
)
eq_(
list(
config.db.execute(
table.select()
.where(table.c["spaces % more spaces"].in_([9, 10]))
.order_by(table.c["percent%"])
)
),
[(9, 10), (11, 9)],
)
row = config.db.execute(
table.select().order_by(table.c["percent%"])
).first()
eq_(row["percent%"], 5)
eq_(row["spaces % more spaces"], 12)
eq_(row[table.c["percent%"]], 5)
eq_(row[table.c["spaces % more spaces"]], 12)
config.db.execute(
percent_table.update().values(
{percent_table.c["spaces % more spaces"]: 15}
)
)
eq_(
list(
config.db.execute(
percent_table.select().order_by(
percent_table.c["percent%"]
)
)
),
[(5, 15), (7, 15), (9, 15), (11, 15)],
)
class ServerSideCursorsTest(
fixtures.TestBase, testing.AssertsExecutionResults
):
__requires__ = ("server_side_cursors",)
__backend__ = True
def _is_server_side(self, cursor):
if self.engine.dialect.driver == "psycopg2":
return bool(cursor.name)
elif self.engine.dialect.driver == "pymysql":
sscursor = __import__("pymysql.cursors").cursors.SSCursor
return isinstance(cursor, sscursor)
elif self.engine.dialect.driver == "mysqldb":
sscursor = __import__("MySQLdb.cursors").cursors.SSCursor
return isinstance(cursor, sscursor)
else:
return False
def _fixture(self, server_side_cursors):
self.engine = engines.testing_engine(
options={"server_side_cursors": server_side_cursors}
)
return self.engine
def tearDown(self):
engines.testing_reaper.close_all()
self.engine.dispose()
@testing.combinations(
("global_string", True, "select 1", True),
("global_text", True, text("select 1"), True),
("global_expr", True, select([1]), True),
("global_off_explicit", False, text("select 1"), False),
(
"stmt_option",
False,
select([1]).execution_options(stream_results=True),
True,
),
(
"stmt_option_disabled",
True,
select([1]).execution_options(stream_results=False),
False,
),
("for_update_expr", True, select([1]).with_for_update(), True),
("for_update_string", True, "SELECT 1 FOR UPDATE", True),
("text_no_ss", False, text("select 42"), False),
(
"text_ss_option",
False,
text("select 42").execution_options(stream_results=True),
True,
),
id_="iaaa",
argnames="engine_ss_arg, statement, cursor_ss_status",
)
def test_ss_cursor_status(
self, engine_ss_arg, statement, cursor_ss_status
):
engine = self._fixture(engine_ss_arg)
with engine.begin() as conn:
result = conn.execute(statement)
eq_(self._is_server_side(result.cursor), cursor_ss_status)
result.close()
def test_conn_option(self):
engine = self._fixture(False)
# should be enabled for this one
result = (
engine.connect()
.execution_options(stream_results=True)
.execute("select 1")
)
assert self._is_server_side(result.cursor)
def test_stmt_enabled_conn_option_disabled(self):
engine = self._fixture(False)
s = select([1]).execution_options(stream_results=True)
# not this one
result = (
engine.connect().execution_options(stream_results=False).execute(s)
)
assert not self._is_server_side(result.cursor)
def test_aliases_and_ss(self):
engine = self._fixture(False)
s1 = select([1]).execution_options(stream_results=True).alias()
with engine.begin() as conn:
result = conn.execute(s1)
assert self._is_server_side(result.cursor)
result.close()
# s1's options shouldn't affect s2 when s2 is used as a
# from_obj.
s2 = select([1], from_obj=s1)
with engine.begin() as conn:
result = conn.execute(s2)
assert not self._is_server_side(result.cursor)
result.close()
@testing.provide_metadata
def test_roundtrip(self):
md = self.metadata
self._fixture(True)
test_table = Table(
"test_table",
md,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
test_table.create(checkfirst=True)
test_table.insert().execute(data="data1")
test_table.insert().execute(data="data2")
eq_(
test_table.select().order_by(test_table.c.id).execute().fetchall(),
[(1, "data1"), (2, "data2")],
)
test_table.update().where(test_table.c.id == 2).values(
data=test_table.c.data + " updated"
).execute()
eq_(
test_table.select().order_by(test_table.c.id).execute().fetchall(),
[(1, "data1"), (2, "data2 updated")],
)
test_table.delete().execute()
eq_(select([func.count("*")]).select_from(test_table).scalar(), 0)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_insert.py | from .. import config
from .. import engines
from .. import fixtures
from ..assertions import eq_
from ..config import requirements
from ..schema import Column
from ..schema import Table
from ... import Integer
from ... import literal
from ... import literal_column
from ... import select
from ... import String
class LastrowidTest(fixtures.TablesTest):
run_deletes = "each"
__backend__ = True
__requires__ = "implements_get_lastrowid", "autoincrement_insert"
__engine_options__ = {"implicit_returning": False}
@classmethod
def define_tables(cls, metadata):
Table(
"autoinc_pk",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"manual_pk",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("data", String(50)),
)
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(row, (config.db.dialect.default_sequence_base, "some data"))
def test_autoincrement_on_insert(self):
config.db.execute(self.tables.autoinc_pk.insert(), data="some data")
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(), data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(r.inserted_primary_key, [pk])
# failed on pypy1.9 but seems to be OK on pypy 2.1
# @exclusions.fails_if(lambda: util.pypy,
# "lastrowid not maintained after "
# "connection close")
@requirements.dbapi_lastrowid
def test_native_lastrowid_autoinc(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(), data="some data"
)
lastrowid = r.lastrowid
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(lastrowid, pk)
class InsertBehaviorTest(fixtures.TablesTest):
run_deletes = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"autoinc_pk",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"manual_pk",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("data", String(50)),
)
Table(
"includes_defaults",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
Column("x", Integer, default=5),
Column(
"y",
Integer,
default=literal_column("2", type_=Integer) + literal(2),
),
)
def test_autoclose_on_insert(self):
if requirements.returning.enabled:
engine = engines.testing_engine(
options={"implicit_returning": False}
)
else:
engine = config.db
with engine.begin() as conn:
r = conn.execute(self.tables.autoinc_pk.insert(), data="some data")
assert r._soft_closed
assert not r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.returning
def test_autoclose_on_insert_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(), data="some data"
)
assert r._soft_closed
assert not r.closed
assert r.is_insert
assert not r.returns_rows
@requirements.empty_inserts
def test_empty_insert(self):
r = config.db.execute(self.tables.autoinc_pk.insert())
assert r._soft_closed
assert not r.closed
r = config.db.execute(
self.tables.autoinc_pk.select().where(
self.tables.autoinc_pk.c.id != None
)
)
assert len(r.fetchall())
@requirements.insert_from_select
def test_insert_from_select_autoinc(self):
src_table = self.tables.manual_pk
dest_table = self.tables.autoinc_pk
config.db.execute(
src_table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
],
)
result = config.db.execute(
dest_table.insert().from_select(
("data",),
select([src_table.c.data]).where(
src_table.c.data.in_(["data2", "data3"])
),
)
)
eq_(result.inserted_primary_key, [None])
result = config.db.execute(
select([dest_table.c.data]).order_by(dest_table.c.data)
)
eq_(result.fetchall(), [("data2",), ("data3",)])
@requirements.insert_from_select
def test_insert_from_select_autoinc_no_rows(self):
src_table = self.tables.manual_pk
dest_table = self.tables.autoinc_pk
result = config.db.execute(
dest_table.insert().from_select(
("data",),
select([src_table.c.data]).where(
src_table.c.data.in_(["data2", "data3"])
),
)
)
eq_(result.inserted_primary_key, [None])
result = config.db.execute(
select([dest_table.c.data]).order_by(dest_table.c.data)
)
eq_(result.fetchall(), [])
@requirements.insert_from_select
def test_insert_from_select(self):
table = self.tables.manual_pk
config.db.execute(
table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
],
)
config.db.execute(
table.insert(inline=True).from_select(
("id", "data"),
select([table.c.id + 5, table.c.data]).where(
table.c.data.in_(["data2", "data3"])
),
)
)
eq_(
config.db.execute(
select([table.c.data]).order_by(table.c.data)
).fetchall(),
[("data1",), ("data2",), ("data2",), ("data3",), ("data3",)],
)
@requirements.insert_from_select
def test_insert_from_select_with_defaults(self):
table = self.tables.includes_defaults
config.db.execute(
table.insert(),
[
dict(id=1, data="data1"),
dict(id=2, data="data2"),
dict(id=3, data="data3"),
],
)
config.db.execute(
table.insert(inline=True).from_select(
("id", "data"),
select([table.c.id + 5, table.c.data]).where(
table.c.data.in_(["data2", "data3"])
),
)
)
eq_(
config.db.execute(
select([table]).order_by(table.c.data, table.c.id)
).fetchall(),
[
(1, "data1", 5, 4),
(2, "data2", 5, 4),
(7, "data2", 5, 4),
(3, "data3", 5, 4),
(8, "data3", 5, 4),
],
)
class ReturningTest(fixtures.TablesTest):
run_create_tables = "each"
__requires__ = "returning", "autoincrement_insert"
__backend__ = True
__engine_options__ = {"implicit_returning": True}
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(row, (config.db.dialect.default_sequence_base, "some data"))
@classmethod
def define_tables(cls, metadata):
Table(
"autoinc_pk",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
@requirements.fetch_rows_post_commit
def test_explicit_returning_pk_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
with engine.begin() as conn:
r = conn.execute(
table.insert().returning(table.c.id), data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_explicit_returning_pk_no_autocommit(self):
engine = config.db
table = self.tables.autoinc_pk
with engine.begin() as conn:
r = conn.execute(
table.insert().returning(table.c.id), data="some data"
)
pk = r.first()[0]
fetched_pk = config.db.scalar(select([table.c.id]))
eq_(fetched_pk, pk)
def test_autoincrement_on_insert_implicit_returning(self):
config.db.execute(self.tables.autoinc_pk.insert(), data="some data")
self._assert_round_trip(self.tables.autoinc_pk, config.db)
def test_last_inserted_id_implicit_returning(self):
r = config.db.execute(
self.tables.autoinc_pk.insert(), data="some data"
)
pk = config.db.scalar(select([self.tables.autoinc_pk.c.id]))
eq_(r.inserted_primary_key, [pk])
__all__ = ("LastrowidTest", "InsertBehaviorTest", "ReturningTest")
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_select.py | from .. import config
from .. import fixtures
from ..assertions import eq_
from ..assertions import in_
from ..schema import Column
from ..schema import Table
from ... import bindparam
from ... import case
from ... import Computed
from ... import false
from ... import func
from ... import Integer
from ... import literal_column
from ... import null
from ... import select
from ... import String
from ... import testing
from ... import text
from ... import true
from ... import tuple_
from ... import union
from ... import util
class CollateTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(100)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "data": "collate data1"},
{"id": 2, "data": "collate data2"},
],
)
def _assert_result(self, select, result):
eq_(config.db.execute(select).fetchall(), result)
@testing.requires.order_by_collation
def test_collate_order_by(self):
collation = testing.requires.get_order_by_collation(testing.config)
self._assert_result(
select([self.tables.some_table]).order_by(
self.tables.some_table.c.data.collate(collation).asc()
),
[(1, "collate data1"), (2, "collate data2")],
)
class OrderByLabelTest(fixtures.TablesTest):
"""Test the dialect sends appropriate ORDER BY expressions when
labels are used.
This essentially exercises the "supports_simple_order_by_label"
setting.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("q", String(50)),
Column("p", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"},
{"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"},
{"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"},
],
)
def _assert_result(self, select, result):
eq_(config.db.execute(select).fetchall(), result)
def test_plain(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(select([lx]).order_by(lx), [(1,), (2,), (3,)])
def test_composed_int(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(select([lx]).order_by(lx), [(3,), (5,), (7,)])
def test_composed_multiple(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
ly = (func.lower(table.c.q) + table.c.p).label("ly")
self._assert_result(
select([lx, ly]).order_by(lx, ly.desc()),
[(3, util.u("q1p3")), (5, util.u("q2p2")), (7, util.u("q3p1"))],
)
def test_plain_desc(self):
table = self.tables.some_table
lx = table.c.x.label("lx")
self._assert_result(
select([lx]).order_by(lx.desc()), [(3,), (2,), (1,)]
)
def test_composed_int_desc(self):
table = self.tables.some_table
lx = (table.c.x + table.c.y).label("lx")
self._assert_result(
select([lx]).order_by(lx.desc()), [(7,), (5,), (3,)]
)
@testing.requires.group_by_complex_expression
def test_group_by_composed(self):
table = self.tables.some_table
expr = (table.c.x + table.c.y).label("lx")
stmt = (
select([func.count(table.c.id), expr])
.group_by(expr)
.order_by(expr)
)
self._assert_result(stmt, [(1, 3), (1, 5), (1, 7)])
class LimitOffsetTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
],
)
def _assert_result(self, select, result, params=()):
eq_(config.db.execute(select, params).fetchall(), result)
def test_simple_limit(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(2),
[(1, 1, 2), (2, 2, 3)],
)
@testing.requires.offset
def test_simple_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).offset(2),
[(3, 3, 4), (4, 4, 5)],
)
@testing.requires.offset
def test_simple_limit_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(2).offset(1),
[(2, 2, 3), (3, 3, 4)],
)
@testing.requires.offset
def test_limit_offset_nobinds(self):
"""test that 'literal binds' mode works - no bound params."""
table = self.tables.some_table
stmt = select([table]).order_by(table.c.id).limit(2).offset(1)
sql = stmt.compile(
dialect=config.db.dialect, compile_kwargs={"literal_binds": True}
)
sql = str(sql)
self._assert_result(sql, [(2, 2, 3), (3, 3, 4)])
@testing.requires.bound_limit_offset
def test_bound_limit(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).limit(bindparam("l")),
[(1, 1, 2), (2, 2, 3)],
params={"l": 2},
)
@testing.requires.bound_limit_offset
def test_bound_offset(self):
table = self.tables.some_table
self._assert_result(
select([table]).order_by(table.c.id).offset(bindparam("o")),
[(3, 3, 4), (4, 4, 5)],
params={"o": 2},
)
@testing.requires.bound_limit_offset
def test_bound_limit_offset(self):
table = self.tables.some_table
self._assert_result(
select([table])
.order_by(table.c.id)
.limit(bindparam("l"))
.offset(bindparam("o")),
[(2, 2, 3), (3, 3, 4)],
params={"l": 2, "o": 1},
)
class CompoundSelectTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
],
)
def _assert_result(self, select, result, params=()):
eq_(config.db.execute(select, params).fetchall(), result)
def test_plain_union(self):
table = self.tables.some_table
s1 = select([table]).where(table.c.id == 2)
s2 = select([table]).where(table.c.id == 3)
u1 = union(s1, s2)
self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def test_select_from_plain_union(self):
table = self.tables.some_table
s1 = select([table]).where(table.c.id == 2)
s2 = select([table]).where(table.c.id == 3)
u1 = union(s1, s2).alias().select()
self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
@testing.requires.order_by_col_from_union
@testing.requires.parens_in_union_contained_select_w_limit_offset
def test_limit_offset_selectable_in_unions(self):
table = self.tables.some_table
s1 = (
select([table])
.where(table.c.id == 2)
.limit(1)
.order_by(table.c.id)
)
s2 = (
select([table])
.where(table.c.id == 3)
.limit(1)
.order_by(table.c.id)
)
u1 = union(s1, s2).limit(2)
self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
@testing.requires.parens_in_union_contained_select_wo_limit_offset
def test_order_by_selectable_in_unions(self):
table = self.tables.some_table
s1 = select([table]).where(table.c.id == 2).order_by(table.c.id)
s2 = select([table]).where(table.c.id == 3).order_by(table.c.id)
u1 = union(s1, s2).limit(2)
self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
def test_distinct_selectable_in_unions(self):
table = self.tables.some_table
s1 = select([table]).where(table.c.id == 2).distinct()
s2 = select([table]).where(table.c.id == 3).distinct()
u1 = union(s1, s2).limit(2)
self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
@testing.requires.parens_in_union_contained_select_w_limit_offset
def test_limit_offset_in_unions_from_alias(self):
table = self.tables.some_table
s1 = (
select([table])
.where(table.c.id == 2)
.limit(1)
.order_by(table.c.id)
)
s2 = (
select([table])
.where(table.c.id == 3)
.limit(1)
.order_by(table.c.id)
)
# this necessarily has double parens
u1 = union(s1, s2).alias()
self._assert_result(
u1.select().limit(2).order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]
)
def test_limit_offset_aliased_selectable_in_unions(self):
table = self.tables.some_table
s1 = (
select([table])
.where(table.c.id == 2)
.limit(1)
.order_by(table.c.id)
.alias()
.select()
)
s2 = (
select([table])
.where(table.c.id == 3)
.limit(1)
.order_by(table.c.id)
.alias()
.select()
)
u1 = union(s1, s2).limit(2)
self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)])
class ExpandingBoundInTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
Column("z", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2, "z": "z1"},
{"id": 2, "x": 2, "y": 3, "z": "z2"},
{"id": 3, "x": 3, "y": 4, "z": "z3"},
{"id": 4, "x": 4, "y": 5, "z": "z4"},
],
)
def _assert_result(self, select, result, params=()):
eq_(config.db.execute(select, params).fetchall(), result)
def test_multiple_empty_sets(self):
# test that any anonymous aliasing used by the dialect
# is fine with duplicates
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(table.c.x.in_(bindparam("q", expanding=True)))
.where(table.c.y.in_(bindparam("p", expanding=True)))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": [], "p": []})
@testing.requires.tuple_in
def test_empty_heterogeneous_tuples(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(
tuple_(table.c.x, table.c.z).in_(
bindparam("q", expanding=True)
)
)
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
@testing.requires.tuple_in
def test_empty_homogeneous_tuples(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(
tuple_(table.c.x, table.c.y).in_(
bindparam("q", expanding=True)
)
)
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
def test_bound_in_scalar(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(table.c.x.in_(bindparam("q", expanding=True)))
.order_by(table.c.id)
)
self._assert_result(stmt, [(2,), (3,), (4,)], params={"q": [2, 3, 4]})
@testing.requires.tuple_in
def test_bound_in_two_tuple(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(
tuple_(table.c.x, table.c.y).in_(
bindparam("q", expanding=True)
)
)
.order_by(table.c.id)
)
self._assert_result(
stmt, [(2,), (3,), (4,)], params={"q": [(2, 3), (3, 4), (4, 5)]}
)
@testing.requires.tuple_in
def test_bound_in_heterogeneous_two_tuple(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(
tuple_(table.c.x, table.c.z).in_(
bindparam("q", expanding=True)
)
)
.order_by(table.c.id)
)
self._assert_result(
stmt,
[(2,), (3,), (4,)],
params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]},
)
def test_empty_set_against_integer(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(table.c.x.in_(bindparam("q", expanding=True)))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
def test_empty_set_against_integer_negation(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(table.c.x.notin_(bindparam("q", expanding=True)))
.order_by(table.c.id)
)
self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})
def test_empty_set_against_string(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(table.c.z.in_(bindparam("q", expanding=True)))
.order_by(table.c.id)
)
self._assert_result(stmt, [], params={"q": []})
def test_empty_set_against_string_negation(self):
table = self.tables.some_table
stmt = (
select([table.c.id])
.where(table.c.z.notin_(bindparam("q", expanding=True)))
.order_by(table.c.id)
)
self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []})
def test_null_in_empty_set_is_false(self):
stmt = select(
[
case(
[
(
null().in_(
bindparam("foo", value=(), expanding=True)
),
true(),
)
],
else_=false(),
)
]
)
in_(config.db.execute(stmt).fetchone()[0], (False, 0))
class LikeFunctionsTest(fixtures.TablesTest):
__backend__ = True
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "data": "abcdefg"},
{"id": 2, "data": "ab/cdefg"},
{"id": 3, "data": "ab%cdefg"},
{"id": 4, "data": "ab_cdefg"},
{"id": 5, "data": "abcde/fg"},
{"id": 6, "data": "abcde%fg"},
{"id": 7, "data": "ab#cdefg"},
{"id": 8, "data": "ab9cdefg"},
{"id": 9, "data": "abcde#fg"},
{"id": 10, "data": "abcd9fg"},
],
)
def _test(self, expr, expected):
some_table = self.tables.some_table
with config.db.connect() as conn:
rows = {
value
for value, in conn.execute(
select([some_table.c.id]).where(expr)
)
}
eq_(rows, expected)
def test_startswith_unescaped(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab%c"), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
def test_startswith_autoescape(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab%c", autoescape=True), {3})
def test_startswith_sqlexpr(self):
col = self.tables.some_table.c.data
self._test(
col.startswith(literal_column("'ab%c'")),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
)
def test_startswith_escape(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab##c", escape="#"), {7})
def test_startswith_autoescape_escape(self):
col = self.tables.some_table.c.data
self._test(col.startswith("ab%c", autoescape=True, escape="#"), {3})
self._test(col.startswith("ab#c", autoescape=True, escape="#"), {7})
def test_endswith_unescaped(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e%fg"), {1, 2, 3, 4, 5, 6, 7, 8, 9})
def test_endswith_sqlexpr(self):
col = self.tables.some_table.c.data
self._test(
col.endswith(literal_column("'e%fg'")), {1, 2, 3, 4, 5, 6, 7, 8, 9}
)
def test_endswith_autoescape(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e%fg", autoescape=True), {6})
def test_endswith_escape(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e##fg", escape="#"), {9})
def test_endswith_autoescape_escape(self):
col = self.tables.some_table.c.data
self._test(col.endswith("e%fg", autoescape=True, escape="#"), {6})
self._test(col.endswith("e#fg", autoescape=True, escape="#"), {9})
def test_contains_unescaped(self):
col = self.tables.some_table.c.data
self._test(col.contains("b%cde"), {1, 2, 3, 4, 5, 6, 7, 8, 9})
def test_contains_autoescape(self):
col = self.tables.some_table.c.data
self._test(col.contains("b%cde", autoescape=True), {3})
def test_contains_escape(self):
col = self.tables.some_table.c.data
self._test(col.contains("b##cde", escape="#"), {7})
def test_contains_autoescape_escape(self):
col = self.tables.some_table.c.data
self._test(col.contains("b%cd", autoescape=True, escape="#"), {3})
self._test(col.contains("b#cd", autoescape=True, escape="#"), {7})
class ComputedColumnTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("computed_columns",)
@classmethod
def define_tables(cls, metadata):
Table(
"square",
metadata,
Column("id", Integer, primary_key=True),
Column("side", Integer),
Column("area", Integer, Computed("side * side")),
Column("perimeter", Integer, Computed("4 * side")),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.square.insert(),
[{"id": 1, "side": 10}, {"id": 10, "side": 42}],
)
def test_select_all(self):
with config.db.connect() as conn:
res = conn.execute(
select([text("*")])
.select_from(self.tables.square)
.order_by(self.tables.square.c.id)
).fetchall()
eq_(res, [(1, 10, 100, 40), (10, 42, 1764, 168)])
def test_select_columns(self):
with config.db.connect() as conn:
res = conn.execute(
select(
[self.tables.square.c.area, self.tables.square.c.perimeter]
)
.select_from(self.tables.square)
.order_by(self.tables.square.c.id)
).fetchall()
eq_(res, [(100, 40), (1764, 168)])
class IsOrIsNotDistinctFromTest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("supports_is_distinct_from",)
@classmethod
def define_tables(cls, metadata):
Table(
"is_distinct_test",
metadata,
Column("id", Integer, primary_key=True),
Column("col_a", Integer, nullable=True),
Column("col_b", Integer, nullable=True),
)
@testing.combinations(
("both_int_different", 0, 1, 1),
("both_int_same", 1, 1, 0),
("one_null_first", None, 1, 1),
("one_null_second", 0, None, 1),
("both_null", None, None, 0),
id_="iaaa",
argnames="col_a_value, col_b_value, expected_row_count_for_is",
)
def test_is_or_isnot_distinct_from(
self, col_a_value, col_b_value, expected_row_count_for_is, connection
):
tbl = self.tables.is_distinct_test
connection.execute(
tbl.insert(),
[{"id": 1, "col_a": col_a_value, "col_b": col_b_value}],
)
result = connection.execute(
tbl.select(tbl.c.col_a.is_distinct_from(tbl.c.col_b))
).fetchall()
eq_(
len(result), expected_row_count_for_is,
)
expected_row_count_for_isnot = (
1 if expected_row_count_for_is == 0 else 0
)
result = connection.execute(
tbl.select(tbl.c.col_a.isnot_distinct_from(tbl.c.col_b))
).fetchall()
eq_(
len(result), expected_row_count_for_isnot,
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_ddl.py | from .. import config
from .. import fixtures
from .. import util
from ..assertions import eq_
from ..config import requirements
from ... import Column
from ... import inspect
from ... import Integer
from ... import schema
from ... import String
from ... import Table
class TableDDLTest(fixtures.TestBase):
__backend__ = True
def _simple_fixture(self, schema=None):
return Table(
"test_table",
self.metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("data", String(50)),
schema=schema,
)
def _underscore_fixture(self):
return Table(
"_test_table",
self.metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("_data", String(50)),
)
def _simple_roundtrip(self, table):
with config.db.begin() as conn:
conn.execute(table.insert().values((1, "some data")))
result = conn.execute(table.select())
eq_(result.first(), (1, "some data"))
@requirements.create_table
@util.provide_metadata
def test_create_table(self):
table = self._simple_fixture()
table.create(config.db, checkfirst=False)
self._simple_roundtrip(table)
@requirements.create_table
@requirements.schemas
@util.provide_metadata
def test_create_table_schema(self):
table = self._simple_fixture(schema=config.test_schema)
table.create(config.db, checkfirst=False)
self._simple_roundtrip(table)
@requirements.drop_table
@util.provide_metadata
def test_drop_table(self):
table = self._simple_fixture()
table.create(config.db, checkfirst=False)
table.drop(config.db, checkfirst=False)
@requirements.create_table
@util.provide_metadata
def test_underscore_names(self):
table = self._underscore_fixture()
table.create(config.db, checkfirst=False)
self._simple_roundtrip(table)
@requirements.comment_reflection
@util.provide_metadata
def test_add_table_comment(self):
table = self._simple_fixture()
table.create(config.db, checkfirst=False)
table.comment = "a comment"
config.db.execute(schema.SetTableComment(table))
eq_(
inspect(config.db).get_table_comment("test_table"),
{"text": "a comment"},
)
@requirements.comment_reflection
@util.provide_metadata
def test_drop_table_comment(self):
table = self._simple_fixture()
table.create(config.db, checkfirst=False)
table.comment = "a comment"
config.db.execute(schema.SetTableComment(table))
config.db.execute(schema.DropTableComment(table))
eq_(inspect(config.db).get_table_comment("test_table"), {"text": None})
__all__ = ("TableDDLTest",)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_dialect.py | #! coding: utf-8
from .. import assert_raises
from .. import config
from .. import eq_
from .. import fixtures
from .. import ne_
from .. import provide_metadata
from ..config import requirements
from ..schema import Column
from ..schema import Table
from ... import exc
from ... import Integer
from ... import literal_column
from ... import select
from ... import String
from ...util import compat
class ExceptionTest(fixtures.TablesTest):
"""Test basic exception wrapping.
DBAPIs vary a lot in exception behavior so to actually anticipate
specific exceptions from real round trips, we need to be conservative.
"""
run_deletes = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"manual_pk",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("data", String(50)),
)
@requirements.duplicate_key_raises_integrity_error
def test_integrity_error(self):
with config.db.connect() as conn:
trans = conn.begin()
conn.execute(
self.tables.manual_pk.insert(), {"id": 1, "data": "d1"}
)
assert_raises(
exc.IntegrityError,
conn.execute,
self.tables.manual_pk.insert(),
{"id": 1, "data": "d1"},
)
trans.rollback()
def test_exception_with_non_ascii(self):
with config.db.connect() as conn:
try:
# try to create an error message that likely has non-ascii
# characters in the DBAPI's message string. unfortunately
# there's no way to make this happen with some drivers like
# mysqlclient, pymysql. this at least does produce a non-
# ascii error message for cx_oracle, psycopg2
conn.execute(select([literal_column(u"méil")]))
assert False
except exc.DBAPIError as err:
err_str = str(err)
assert str(err.orig) in str(err)
# test that we are actually getting string on Py2k, unicode
# on Py3k.
if compat.py2k:
assert isinstance(err_str, str)
else:
assert isinstance(err_str, str)
class IsolationLevelTest(fixtures.TestBase):
__backend__ = True
__requires__ = ("isolation_level",)
def _get_non_default_isolation_level(self):
levels = requirements.get_isolation_levels(config)
default = levels["default"]
supported = levels["supported"]
s = set(supported).difference(["AUTOCOMMIT", default])
if s:
return s.pop()
else:
config.skip_test("no non-default isolation level available")
def test_default_isolation_level(self):
eq_(
config.db.dialect.default_isolation_level,
requirements.get_isolation_levels(config)["default"],
)
def test_non_default_isolation_level(self):
non_default = self._get_non_default_isolation_level()
with config.db.connect() as conn:
existing = conn.get_isolation_level()
ne_(existing, non_default)
conn.execution_options(isolation_level=non_default)
eq_(conn.get_isolation_level(), non_default)
conn.dialect.reset_isolation_level(conn.connection)
eq_(conn.get_isolation_level(), existing)
class AutocommitTest(fixtures.TablesTest):
run_deletes = "each"
__requires__ = ("autocommit",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("data", String(50)),
test_needs_acid=True,
)
def _test_conn_autocommits(self, conn, autocommit):
trans = conn.begin()
conn.execute(
self.tables.some_table.insert(), {"id": 1, "data": "some data"}
)
trans.rollback()
eq_(
conn.scalar(select([self.tables.some_table.c.id])),
1 if autocommit else None,
)
conn.execute(self.tables.some_table.delete())
def test_autocommit_on(self):
conn = config.db.connect()
c2 = conn.execution_options(isolation_level="AUTOCOMMIT")
self._test_conn_autocommits(c2, True)
c2.dialect.reset_isolation_level(c2.connection)
self._test_conn_autocommits(conn, False)
def test_autocommit_off(self):
conn = config.db.connect()
self._test_conn_autocommits(conn, False)
def test_turn_autocommit_off_via_default_iso_level(self):
conn = config.db.connect()
conn.execution_options(isolation_level="AUTOCOMMIT")
self._test_conn_autocommits(conn, True)
conn.execution_options(
isolation_level=requirements.get_isolation_levels(config)[
"default"
]
)
self._test_conn_autocommits(conn, False)
class EscapingTest(fixtures.TestBase):
@provide_metadata
def test_percent_sign_round_trip(self):
"""test that the DBAPI accommodates for escaped / nonescaped
percent signs in a way that matches the compiler
"""
m = self.metadata
t = Table("t", m, Column("data", String(50)))
t.create(config.db)
with config.db.begin() as conn:
conn.execute(t.insert(), dict(data="some % value"))
conn.execute(t.insert(), dict(data="some %% other value"))
eq_(
conn.scalar(
select([t.c.data]).where(
t.c.data == literal_column("'some % value'")
)
),
"some % value",
)
eq_(
conn.scalar(
select([t.c.data]).where(
t.c.data == literal_column("'some %% other value'")
)
),
"some %% other value",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/__init__.py | from .test_cte import * # noqa
from .test_ddl import * # noqa
from .test_dialect import * # noqa
from .test_insert import * # noqa
from .test_reflection import * # noqa
from .test_results import * # noqa
from .test_select import * # noqa
from .test_sequence import * # noqa
from .test_types import * # noqa
from .test_update_delete import * # noqa
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_update_delete.py | from .. import config
from .. import fixtures
from ..assertions import eq_
from ..schema import Column
from ..schema import Table
from ... import Integer
from ... import String
class SimpleUpdateDeleteTest(fixtures.TablesTest):
run_deletes = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"plain_pk",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.plain_pk.insert(),
[
{"id": 1, "data": "d1"},
{"id": 2, "data": "d2"},
{"id": 3, "data": "d3"},
],
)
def test_update(self):
t = self.tables.plain_pk
r = config.db.execute(t.update().where(t.c.id == 2), data="d2_new")
assert not r.is_insert
assert not r.returns_rows
eq_(
config.db.execute(t.select().order_by(t.c.id)).fetchall(),
[(1, "d1"), (2, "d2_new"), (3, "d3")],
)
def test_delete(self):
t = self.tables.plain_pk
r = config.db.execute(t.delete().where(t.c.id == 2))
assert not r.is_insert
assert not r.returns_rows
eq_(
config.db.execute(t.select().order_by(t.c.id)).fetchall(),
[(1, "d1"), (3, "d3")],
)
__all__ = ("SimpleUpdateDeleteTest",)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_reflection.py | import operator
import re
import sqlalchemy as sa
from .. import assert_raises_message
from .. import config
from .. import engines
from .. import eq_
from .. import expect_warnings
from .. import fixtures
from .. import is_
from ..provision import temp_table_keyword_args
from ..schema import Column
from ..schema import Table
from ... import event
from ... import exc as sa_exc
from ... import ForeignKey
from ... import inspect
from ... import Integer
from ... import MetaData
from ... import String
from ... import testing
from ... import types as sql_types
from ...engine.reflection import Inspector
from ...schema import DDL
from ...schema import Index
from ...sql.elements import quoted_name
from ...testing import is_false
from ...testing import is_true
metadata, users = None, None
class HasTableTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
if testing.requires.schemas.enabled:
Table(
"test_table_s",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
schema=config.test_schema,
)
def test_has_table(self):
with config.db.begin() as conn:
is_true(config.db.dialect.has_table(conn, "test_table"))
is_false(config.db.dialect.has_table(conn, "test_table_s"))
is_false(config.db.dialect.has_table(conn, "nonexistent_table"))
@testing.requires.schemas
def test_has_table_schema(self):
with config.db.begin() as conn:
is_false(
config.db.dialect.has_table(
conn, "test_table", schema=config.test_schema
)
)
is_true(
config.db.dialect.has_table(
conn, "test_table_s", schema=config.test_schema
)
)
is_false(
config.db.dialect.has_table(
conn, "nonexistent_table", schema=config.test_schema
)
)
class ComponentReflectionTest(fixtures.TablesTest):
run_inserts = run_deletes = None
__backend__ = True
@classmethod
def setup_bind(cls):
if config.requirements.independent_connections.enabled:
from sqlalchemy import pool
return engines.testing_engine(
options=dict(poolclass=pool.StaticPool)
)
else:
return config.db
@classmethod
def define_tables(cls, metadata):
cls.define_reflected_tables(metadata, None)
if testing.requires.schemas.enabled:
cls.define_reflected_tables(metadata, testing.config.test_schema)
@classmethod
def define_reflected_tables(cls, metadata, schema):
if schema:
schema_prefix = schema + "."
else:
schema_prefix = ""
if testing.requires.self_referential_foreign_keys.enabled:
users = Table(
"users",
metadata,
Column("user_id", sa.INT, primary_key=True),
Column("test1", sa.CHAR(5), nullable=False),
Column("test2", sa.Float(5), nullable=False),
Column(
"parent_user_id",
sa.Integer,
sa.ForeignKey(
"%susers.user_id" % schema_prefix, name="user_id_fk"
),
),
schema=schema,
test_needs_fk=True,
)
else:
users = Table(
"users",
metadata,
Column("user_id", sa.INT, primary_key=True),
Column("test1", sa.CHAR(5), nullable=False),
Column("test2", sa.Float(5), nullable=False),
schema=schema,
test_needs_fk=True,
)
Table(
"dingalings",
metadata,
Column("dingaling_id", sa.Integer, primary_key=True),
Column(
"address_id",
sa.Integer,
sa.ForeignKey("%semail_addresses.address_id" % schema_prefix),
),
Column("data", sa.String(30)),
schema=schema,
test_needs_fk=True,
)
Table(
"email_addresses",
metadata,
Column("address_id", sa.Integer),
Column(
"remote_user_id", sa.Integer, sa.ForeignKey(users.c.user_id)
),
Column("email_address", sa.String(20)),
sa.PrimaryKeyConstraint("address_id", name="email_ad_pk"),
schema=schema,
test_needs_fk=True,
)
Table(
"comment_test",
metadata,
Column("id", sa.Integer, primary_key=True, comment="id comment"),
Column("data", sa.String(20), comment="data % comment"),
Column(
"d2",
sa.String(20),
comment=r"""Comment types type speedily ' " \ '' Fun!""",
),
schema=schema,
comment=r"""the test % ' " \ table comment""",
)
if testing.requires.cross_schema_fk_reflection.enabled:
if schema is None:
Table(
"local_table",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("data", sa.String(20)),
Column(
"remote_id",
ForeignKey(
"%s.remote_table_2.id" % testing.config.test_schema
),
),
test_needs_fk=True,
schema=config.db.dialect.default_schema_name,
)
else:
Table(
"remote_table",
metadata,
Column("id", sa.Integer, primary_key=True),
Column(
"local_id",
ForeignKey(
"%s.local_table.id"
% config.db.dialect.default_schema_name
),
),
Column("data", sa.String(20)),
schema=schema,
test_needs_fk=True,
)
Table(
"remote_table_2",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("data", sa.String(20)),
schema=schema,
test_needs_fk=True,
)
if testing.requires.index_reflection.enabled:
cls.define_index(metadata, users)
if not schema:
# test_needs_fk is at the moment to force MySQL InnoDB
noncol_idx_test_nopk = Table(
"noncol_idx_test_nopk",
metadata,
Column("q", sa.String(5)),
test_needs_fk=True,
)
noncol_idx_test_pk = Table(
"noncol_idx_test_pk",
metadata,
Column("id", sa.Integer, primary_key=True),
Column("q", sa.String(5)),
test_needs_fk=True,
)
if testing.requires.indexes_with_ascdesc.enabled:
Index("noncol_idx_nopk", noncol_idx_test_nopk.c.q.desc())
Index("noncol_idx_pk", noncol_idx_test_pk.c.q.desc())
if testing.requires.view_column_reflection.enabled:
cls.define_views(metadata, schema)
if not schema and testing.requires.temp_table_reflection.enabled:
cls.define_temp_tables(metadata)
@classmethod
def define_temp_tables(cls, metadata):
kw = temp_table_keyword_args(config, config.db)
user_tmp = Table(
"user_tmp",
metadata,
Column("id", sa.INT, primary_key=True),
Column("name", sa.VARCHAR(50)),
Column("foo", sa.INT),
sa.UniqueConstraint("name", name="user_tmp_uq"),
sa.Index("user_tmp_ix", "foo"),
**kw
)
if (
testing.requires.view_reflection.enabled
and testing.requires.temporary_views.enabled
):
event.listen(
user_tmp,
"after_create",
DDL(
"create temporary view user_tmp_v as "
"select * from user_tmp"
),
)
event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v"))
@classmethod
def define_index(cls, metadata, users):
Index("users_t_idx", users.c.test1, users.c.test2)
Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1)
@classmethod
def define_views(cls, metadata, schema):
for table_name in ("users", "email_addresses"):
fullname = table_name
if schema:
fullname = "%s.%s" % (schema, table_name)
view_name = fullname + "_v"
query = "CREATE VIEW %s AS SELECT * FROM %s" % (
view_name,
fullname,
)
event.listen(metadata, "after_create", DDL(query))
event.listen(
metadata, "before_drop", DDL("DROP VIEW %s" % view_name)
)
@testing.requires.schema_reflection
def test_get_schema_names(self):
insp = inspect(testing.db)
self.assert_(testing.config.test_schema in insp.get_schema_names())
@testing.requires.schema_reflection
def test_dialect_initialize(self):
engine = engines.testing_engine()
assert not hasattr(engine.dialect, "default_schema_name")
inspect(engine)
assert hasattr(engine.dialect, "default_schema_name")
@testing.requires.schema_reflection
def test_get_default_schema_name(self):
insp = inspect(testing.db)
eq_(insp.default_schema_name, testing.db.dialect.default_schema_name)
@testing.provide_metadata
def _test_get_table_names(
self, schema=None, table_type="table", order_by=None
):
_ignore_tables = [
"comment_test",
"noncol_idx_test_pk",
"noncol_idx_test_nopk",
"local_table",
"remote_table",
"remote_table_2",
]
meta = self.metadata
insp = inspect(meta.bind)
if table_type == "view":
table_names = insp.get_view_names(schema)
table_names.sort()
answer = ["email_addresses_v", "users_v"]
eq_(sorted(table_names), answer)
else:
if order_by:
tables = [
rec[0]
for rec in insp.get_sorted_table_and_fkc_names(schema)
if rec[0]
]
else:
tables = insp.get_table_names(schema)
table_names = [t for t in tables if t not in _ignore_tables]
if order_by == "foreign_key":
answer = ["users", "email_addresses", "dingalings"]
eq_(table_names, answer)
else:
answer = ["dingalings", "email_addresses", "users"]
eq_(sorted(table_names), answer)
@testing.requires.temp_table_names
def test_get_temp_table_names(self):
insp = inspect(self.bind)
temp_table_names = insp.get_temp_table_names()
eq_(sorted(temp_table_names), ["user_tmp"])
@testing.requires.view_reflection
@testing.requires.temp_table_names
@testing.requires.temporary_views
def test_get_temp_view_names(self):
insp = inspect(self.bind)
temp_table_names = insp.get_temp_view_names()
eq_(sorted(temp_table_names), ["user_tmp_v"])
@testing.requires.table_reflection
def test_get_table_names(self):
self._test_get_table_names()
@testing.requires.table_reflection
@testing.requires.foreign_key_constraint_reflection
def test_get_table_names_fks(self):
self._test_get_table_names(order_by="foreign_key")
@testing.requires.comment_reflection
def test_get_comments(self):
self._test_get_comments()
@testing.requires.comment_reflection
@testing.requires.schemas
def test_get_comments_with_schema(self):
self._test_get_comments(testing.config.test_schema)
def _test_get_comments(self, schema=None):
insp = inspect(testing.db)
eq_(
insp.get_table_comment("comment_test", schema=schema),
{"text": r"""the test % ' " \ table comment"""},
)
eq_(insp.get_table_comment("users", schema=schema), {"text": None})
eq_(
[
{"name": rec["name"], "comment": rec["comment"]}
for rec in insp.get_columns("comment_test", schema=schema)
],
[
{"comment": "id comment", "name": "id"},
{"comment": "data % comment", "name": "data"},
{
"comment": (
r"""Comment types type speedily ' " \ '' Fun!"""
),
"name": "d2",
},
],
)
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_table_names_with_schema(self):
self._test_get_table_names(testing.config.test_schema)
@testing.requires.view_column_reflection
def test_get_view_names(self):
self._test_get_table_names(table_type="view")
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_names_with_schema(self):
self._test_get_table_names(
testing.config.test_schema, table_type="view"
)
@testing.requires.table_reflection
@testing.requires.view_column_reflection
def test_get_tables_and_views(self):
self._test_get_table_names()
self._test_get_table_names(table_type="view")
def _test_get_columns(self, schema=None, table_type="table"):
meta = MetaData(testing.db)
users, addresses = (self.tables.users, self.tables.email_addresses)
table_names = ["users", "email_addresses"]
if table_type == "view":
table_names = ["users_v", "email_addresses_v"]
insp = inspect(meta.bind)
for table_name, table in zip(table_names, (users, addresses)):
schema_name = schema
cols = insp.get_columns(table_name, schema=schema_name)
self.assert_(len(cols) > 0, len(cols))
# should be in order
for i, col in enumerate(table.columns):
eq_(col.name, cols[i]["name"])
ctype = cols[i]["type"].__class__
ctype_def = col.type
if isinstance(ctype_def, sa.types.TypeEngine):
ctype_def = ctype_def.__class__
# Oracle returns Date for DateTime.
if testing.against("oracle") and ctype_def in (
sql_types.Date,
sql_types.DateTime,
):
ctype_def = sql_types.Date
# assert that the desired type and return type share
# a base within one of the generic types.
self.assert_(
len(
set(ctype.__mro__)
.intersection(ctype_def.__mro__)
.intersection(
[
sql_types.Integer,
sql_types.Numeric,
sql_types.DateTime,
sql_types.Date,
sql_types.Time,
sql_types.String,
sql_types._Binary,
]
)
)
> 0,
"%s(%s), %s(%s)"
% (col.name, col.type, cols[i]["name"], ctype),
)
if not col.primary_key:
assert cols[i]["default"] is None
@testing.requires.table_reflection
def test_get_columns(self):
self._test_get_columns()
@testing.provide_metadata
def _type_round_trip(self, *types):
t = Table(
"t",
self.metadata,
*[Column("t%d" % i, type_) for i, type_ in enumerate(types)]
)
t.create()
return [
c["type"] for c in inspect(self.metadata.bind).get_columns("t")
]
@testing.requires.table_reflection
def test_numeric_reflection(self):
for typ in self._type_round_trip(sql_types.Numeric(18, 5)):
assert isinstance(typ, sql_types.Numeric)
eq_(typ.precision, 18)
eq_(typ.scale, 5)
@testing.requires.table_reflection
def test_varchar_reflection(self):
typ = self._type_round_trip(sql_types.String(52))[0]
assert isinstance(typ, sql_types.String)
eq_(typ.length, 52)
@testing.requires.table_reflection
@testing.provide_metadata
def test_nullable_reflection(self):
t = Table(
"t",
self.metadata,
Column("a", Integer, nullable=True),
Column("b", Integer, nullable=False),
)
t.create()
eq_(
dict(
(col["name"], col["nullable"])
for col in inspect(self.metadata.bind).get_columns("t")
),
{"a": True, "b": False},
)
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_columns_with_schema(self):
self._test_get_columns(schema=testing.config.test_schema)
@testing.requires.temp_table_reflection
def test_get_temp_table_columns(self):
meta = MetaData(self.bind)
user_tmp = self.tables.user_tmp
insp = inspect(meta.bind)
cols = insp.get_columns("user_tmp")
self.assert_(len(cols) > 0, len(cols))
for i, col in enumerate(user_tmp.columns):
eq_(col.name, cols[i]["name"])
@testing.requires.temp_table_reflection
@testing.requires.view_column_reflection
@testing.requires.temporary_views
def test_get_temp_view_columns(self):
insp = inspect(self.bind)
cols = insp.get_columns("user_tmp_v")
eq_([col["name"] for col in cols], ["id", "name", "foo"])
@testing.requires.view_column_reflection
def test_get_view_columns(self):
self._test_get_columns(table_type="view")
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_columns_with_schema(self):
self._test_get_columns(
schema=testing.config.test_schema, table_type="view"
)
@testing.provide_metadata
def _test_get_pk_constraint(self, schema=None):
meta = self.metadata
users, addresses = self.tables.users, self.tables.email_addresses
insp = inspect(meta.bind)
users_cons = insp.get_pk_constraint(users.name, schema=schema)
users_pkeys = users_cons["constrained_columns"]
eq_(users_pkeys, ["user_id"])
addr_cons = insp.get_pk_constraint(addresses.name, schema=schema)
addr_pkeys = addr_cons["constrained_columns"]
eq_(addr_pkeys, ["address_id"])
with testing.requires.reflects_pk_names.fail_if():
eq_(addr_cons["name"], "email_ad_pk")
@testing.requires.primary_key_constraint_reflection
def test_get_pk_constraint(self):
self._test_get_pk_constraint()
@testing.requires.table_reflection
@testing.requires.primary_key_constraint_reflection
@testing.requires.schemas
def test_get_pk_constraint_with_schema(self):
self._test_get_pk_constraint(schema=testing.config.test_schema)
@testing.requires.table_reflection
@testing.provide_metadata
def test_deprecated_get_primary_keys(self):
meta = self.metadata
users = self.tables.users
insp = Inspector(meta.bind)
assert_raises_message(
sa_exc.SADeprecationWarning,
r".*get_primary_keys\(\) method is deprecated",
insp.get_primary_keys,
users.name,
)
@testing.provide_metadata
def _test_get_foreign_keys(self, schema=None):
meta = self.metadata
users, addresses = (self.tables.users, self.tables.email_addresses)
insp = inspect(meta.bind)
expected_schema = schema
# users
if testing.requires.self_referential_foreign_keys.enabled:
users_fkeys = insp.get_foreign_keys(users.name, schema=schema)
fkey1 = users_fkeys[0]
with testing.requires.named_constraints.fail_if():
eq_(fkey1["name"], "user_id_fk")
eq_(fkey1["referred_schema"], expected_schema)
eq_(fkey1["referred_table"], users.name)
eq_(fkey1["referred_columns"], ["user_id"])
if testing.requires.self_referential_foreign_keys.enabled:
eq_(fkey1["constrained_columns"], ["parent_user_id"])
# addresses
addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema)
fkey1 = addr_fkeys[0]
with testing.requires.implicitly_named_constraints.fail_if():
self.assert_(fkey1["name"] is not None)
eq_(fkey1["referred_schema"], expected_schema)
eq_(fkey1["referred_table"], users.name)
eq_(fkey1["referred_columns"], ["user_id"])
eq_(fkey1["constrained_columns"], ["remote_user_id"])
@testing.requires.foreign_key_constraint_reflection
def test_get_foreign_keys(self):
self._test_get_foreign_keys()
@testing.requires.foreign_key_constraint_reflection
@testing.requires.schemas
def test_get_foreign_keys_with_schema(self):
self._test_get_foreign_keys(schema=testing.config.test_schema)
@testing.requires.cross_schema_fk_reflection
@testing.requires.schemas
def test_get_inter_schema_foreign_keys(self):
local_table, remote_table, remote_table_2 = self.tables(
"%s.local_table" % testing.db.dialect.default_schema_name,
"%s.remote_table" % testing.config.test_schema,
"%s.remote_table_2" % testing.config.test_schema,
)
insp = inspect(config.db)
local_fkeys = insp.get_foreign_keys(local_table.name)
eq_(len(local_fkeys), 1)
fkey1 = local_fkeys[0]
eq_(fkey1["referred_schema"], testing.config.test_schema)
eq_(fkey1["referred_table"], remote_table_2.name)
eq_(fkey1["referred_columns"], ["id"])
eq_(fkey1["constrained_columns"], ["remote_id"])
remote_fkeys = insp.get_foreign_keys(
remote_table.name, schema=testing.config.test_schema
)
eq_(len(remote_fkeys), 1)
fkey2 = remote_fkeys[0]
assert fkey2["referred_schema"] in (
None,
testing.db.dialect.default_schema_name,
)
eq_(fkey2["referred_table"], local_table.name)
eq_(fkey2["referred_columns"], ["id"])
eq_(fkey2["constrained_columns"], ["local_id"])
@testing.requires.foreign_key_constraint_option_reflection_ondelete
def test_get_foreign_key_options_ondelete(self):
self._test_get_foreign_key_options(ondelete="CASCADE")
@testing.requires.foreign_key_constraint_option_reflection_onupdate
def test_get_foreign_key_options_onupdate(self):
self._test_get_foreign_key_options(onupdate="SET NULL")
@testing.provide_metadata
def _test_get_foreign_key_options(self, **options):
meta = self.metadata
Table(
"x",
meta,
Column("id", Integer, primary_key=True),
test_needs_fk=True,
)
Table(
"table",
meta,
Column("id", Integer, primary_key=True),
Column("x_id", Integer, sa.ForeignKey("x.id", name="xid")),
Column("test", String(10)),
test_needs_fk=True,
)
Table(
"user",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("tid", Integer),
sa.ForeignKeyConstraint(
["tid"], ["table.id"], name="myfk", **options
),
test_needs_fk=True,
)
meta.create_all()
insp = inspect(meta.bind)
# test 'options' is always present for a backend
# that can reflect these, since alembic looks for this
opts = insp.get_foreign_keys("table")[0]["options"]
eq_(dict((k, opts[k]) for k in opts if opts[k]), {})
opts = insp.get_foreign_keys("user")[0]["options"]
eq_(dict((k, opts[k]) for k in opts if opts[k]), options)
def _assert_insp_indexes(self, indexes, expected_indexes):
index_names = [d["name"] for d in indexes]
for e_index in expected_indexes:
assert e_index["name"] in index_names
index = indexes[index_names.index(e_index["name"])]
for key in e_index:
eq_(e_index[key], index[key])
@testing.provide_metadata
def _test_get_indexes(self, schema=None):
meta = self.metadata
# The database may decide to create indexes for foreign keys, etc.
# so there may be more indexes than expected.
insp = inspect(meta.bind)
indexes = insp.get_indexes("users", schema=schema)
expected_indexes = [
{
"unique": False,
"column_names": ["test1", "test2"],
"name": "users_t_idx",
},
{
"unique": False,
"column_names": ["user_id", "test2", "test1"],
"name": "users_all_idx",
},
]
self._assert_insp_indexes(indexes, expected_indexes)
@testing.requires.index_reflection
def test_get_indexes(self):
self._test_get_indexes()
@testing.requires.index_reflection
@testing.requires.schemas
def test_get_indexes_with_schema(self):
self._test_get_indexes(schema=testing.config.test_schema)
@testing.provide_metadata
def _test_get_noncol_index(self, tname, ixname):
meta = self.metadata
insp = inspect(meta.bind)
indexes = insp.get_indexes(tname)
# reflecting an index that has "x DESC" in it as the column.
# the DB may or may not give us "x", but make sure we get the index
# back, it has a name, it's connected to the table.
expected_indexes = [{"unique": False, "name": ixname}]
self._assert_insp_indexes(indexes, expected_indexes)
t = Table(tname, meta, autoload_with=meta.bind)
eq_(len(t.indexes), 1)
is_(list(t.indexes)[0].table, t)
eq_(list(t.indexes)[0].name, ixname)
@testing.requires.index_reflection
@testing.requires.indexes_with_ascdesc
def test_get_noncol_index_no_pk(self):
self._test_get_noncol_index("noncol_idx_test_nopk", "noncol_idx_nopk")
@testing.requires.index_reflection
@testing.requires.indexes_with_ascdesc
def test_get_noncol_index_pk(self):
self._test_get_noncol_index("noncol_idx_test_pk", "noncol_idx_pk")
@testing.requires.indexes_with_expressions
@testing.provide_metadata
def test_reflect_expression_based_indexes(self):
Table(
"t",
self.metadata,
Column("x", String(30)),
Column("y", String(30)),
)
event.listen(
self.metadata,
"after_create",
DDL("CREATE INDEX t_idx ON t(lower(x), lower(y))"),
)
event.listen(
self.metadata, "after_create", DDL("CREATE INDEX t_idx_2 ON t(x)")
)
self.metadata.create_all()
insp = inspect(self.metadata.bind)
with expect_warnings(
"Skipped unsupported reflection of expression-based index t_idx"
):
eq_(
insp.get_indexes("t"),
[{"name": "t_idx_2", "column_names": ["x"], "unique": 0}],
)
@testing.requires.unique_constraint_reflection
def test_get_unique_constraints(self):
self._test_get_unique_constraints()
@testing.requires.temp_table_reflection
@testing.requires.unique_constraint_reflection
def test_get_temp_table_unique_constraints(self):
insp = inspect(self.bind)
reflected = insp.get_unique_constraints("user_tmp")
for refl in reflected:
# Different dialects handle duplicate index and constraints
# differently, so ignore this flag
refl.pop("duplicates_index", None)
eq_(reflected, [{"column_names": ["name"], "name": "user_tmp_uq"}])
@testing.requires.temp_table_reflection
def test_get_temp_table_indexes(self):
insp = inspect(self.bind)
indexes = insp.get_indexes("user_tmp")
for ind in indexes:
ind.pop("dialect_options", None)
eq_(
# TODO: we need to add better filtering for indexes/uq constraints
# that are doubled up
[idx for idx in indexes if idx["name"] == "user_tmp_ix"],
[
{
"unique": False,
"column_names": ["foo"],
"name": "user_tmp_ix",
}
],
)
@testing.requires.unique_constraint_reflection
@testing.requires.schemas
def test_get_unique_constraints_with_schema(self):
self._test_get_unique_constraints(schema=testing.config.test_schema)
@testing.provide_metadata
def _test_get_unique_constraints(self, schema=None):
# SQLite dialect needs to parse the names of the constraints
# separately from what it gets from PRAGMA index_list(), and
# then matches them up. so same set of column_names in two
# constraints will confuse it. Perhaps we should no longer
# bother with index_list() here since we have the whole
# CREATE TABLE?
uniques = sorted(
[
{"name": "unique_a", "column_names": ["a"]},
{"name": "unique_a_b_c", "column_names": ["a", "b", "c"]},
{"name": "unique_c_a_b", "column_names": ["c", "a", "b"]},
{"name": "unique_asc_key", "column_names": ["asc", "key"]},
{"name": "i.have.dots", "column_names": ["b"]},
{"name": "i have spaces", "column_names": ["c"]},
],
key=operator.itemgetter("name"),
)
orig_meta = self.metadata
table = Table(
"testtbl",
orig_meta,
Column("a", sa.String(20)),
Column("b", sa.String(30)),
Column("c", sa.Integer),
# reserved identifiers
Column("asc", sa.String(30)),
Column("key", sa.String(30)),
schema=schema,
)
for uc in uniques:
table.append_constraint(
sa.UniqueConstraint(*uc["column_names"], name=uc["name"])
)
orig_meta.create_all()
inspector = inspect(orig_meta.bind)
reflected = sorted(
inspector.get_unique_constraints("testtbl", schema=schema),
key=operator.itemgetter("name"),
)
names_that_duplicate_index = set()
for orig, refl in zip(uniques, reflected):
# Different dialects handle duplicate index and constraints
# differently, so ignore this flag
dupe = refl.pop("duplicates_index", None)
if dupe:
names_that_duplicate_index.add(dupe)
eq_(orig, refl)
reflected_metadata = MetaData()
reflected = Table(
"testtbl",
reflected_metadata,
autoload_with=orig_meta.bind,
schema=schema,
)
# test "deduplicates for index" logic. MySQL and Oracle
# "unique constraints" are actually unique indexes (with possible
# exception of a unique that is a dupe of another one in the case
# of Oracle). make sure # they aren't duplicated.
idx_names = set([idx.name for idx in reflected.indexes])
uq_names = set(
[
uq.name
for uq in reflected.constraints
if isinstance(uq, sa.UniqueConstraint)
]
).difference(["unique_c_a_b"])
assert not idx_names.intersection(uq_names)
if names_that_duplicate_index:
eq_(names_that_duplicate_index, idx_names)
eq_(uq_names, set())
@testing.requires.check_constraint_reflection
def test_get_check_constraints(self):
self._test_get_check_constraints()
@testing.requires.check_constraint_reflection
@testing.requires.schemas
def test_get_check_constraints_schema(self):
self._test_get_check_constraints(schema=testing.config.test_schema)
@testing.provide_metadata
def _test_get_check_constraints(self, schema=None):
orig_meta = self.metadata
Table(
"sa_cc",
orig_meta,
Column("a", Integer()),
sa.CheckConstraint("a > 1 AND a < 5", name="cc1"),
sa.CheckConstraint("a = 1 OR (a > 2 AND a < 5)", name="cc2"),
schema=schema,
)
orig_meta.create_all()
inspector = inspect(orig_meta.bind)
reflected = sorted(
inspector.get_check_constraints("sa_cc", schema=schema),
key=operator.itemgetter("name"),
)
# trying to minimize effect of quoting, parenthesis, etc.
# may need to add more to this as new dialects get CHECK
# constraint reflection support
def normalize(sqltext):
return " ".join(
re.findall(r"and|\d|=|a|or|<|>", sqltext.lower(), re.I)
)
reflected = [
{"name": item["name"], "sqltext": normalize(item["sqltext"])}
for item in reflected
]
eq_(
reflected,
[
{"name": "cc1", "sqltext": "a > 1 and a < 5"},
{"name": "cc2", "sqltext": "a = 1 or a > 2 and a < 5"},
],
)
@testing.provide_metadata
def _test_get_view_definition(self, schema=None):
meta = self.metadata
view_name1 = "users_v"
view_name2 = "email_addresses_v"
insp = inspect(meta.bind)
v1 = insp.get_view_definition(view_name1, schema=schema)
self.assert_(v1)
v2 = insp.get_view_definition(view_name2, schema=schema)
self.assert_(v2)
@testing.requires.view_reflection
def test_get_view_definition(self):
self._test_get_view_definition()
@testing.requires.view_reflection
@testing.requires.schemas
def test_get_view_definition_with_schema(self):
self._test_get_view_definition(schema=testing.config.test_schema)
@testing.only_on("postgresql", "PG specific feature")
@testing.provide_metadata
def _test_get_table_oid(self, table_name, schema=None):
meta = self.metadata
insp = inspect(meta.bind)
oid = insp.get_table_oid(table_name, schema)
self.assert_(isinstance(oid, int))
def test_get_table_oid(self):
self._test_get_table_oid("users")
@testing.requires.schemas
def test_get_table_oid_with_schema(self):
self._test_get_table_oid("users", schema=testing.config.test_schema)
@testing.requires.table_reflection
@testing.provide_metadata
def test_autoincrement_col(self):
"""test that 'autoincrement' is reflected according to sqla's policy.
Don't mark this test as unsupported for any backend !
(technically it fails with MySQL InnoDB since "id" comes before "id2")
A backend is better off not returning "autoincrement" at all,
instead of potentially returning "False" for an auto-incrementing
primary key column.
"""
meta = self.metadata
insp = inspect(meta.bind)
for tname, cname in [
("users", "user_id"),
("email_addresses", "address_id"),
("dingalings", "dingaling_id"),
]:
cols = insp.get_columns(tname)
id_ = {c["name"]: c for c in cols}[cname]
assert id_.get("autoincrement", True)
class NormalizedNameTest(fixtures.TablesTest):
__requires__ = ("denormalized_names",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
quoted_name("t1", quote=True),
metadata,
Column("id", Integer, primary_key=True),
)
Table(
quoted_name("t2", quote=True),
metadata,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
def test_reflect_lowercase_forced_tables(self):
m2 = MetaData(testing.db)
t2_ref = Table(quoted_name("t2", quote=True), m2, autoload=True)
t1_ref = m2.tables["t1"]
assert t2_ref.c.t1id.references(t1_ref.c.id)
m3 = MetaData(testing.db)
m3.reflect(only=lambda name, m: name.lower() in ("t1", "t2"))
assert m3.tables["t2"].c.t1id.references(m3.tables["t1"].c.id)
def test_get_table_names(self):
tablenames = [
t
for t in inspect(testing.db).get_table_names()
if t.lower() in ("t1", "t2")
]
eq_(tablenames[0].upper(), tablenames[0].lower())
eq_(tablenames[1].upper(), tablenames[1].lower())
class ComputedReflectionTest(fixtures.ComputedReflectionFixtureTest):
def test_computed_col_default_not_set(self):
insp = inspect(config.db)
cols = insp.get_columns("computed_column_table")
for col in cols:
if col["name"] == "with_default":
is_true("42" in col["default"])
elif not col["autoincrement"]:
is_(col["default"], None)
def test_get_column_returns_computed(self):
insp = inspect(config.db)
cols = insp.get_columns("computed_default_table")
data = {c["name"]: c for c in cols}
for key in ("id", "normal", "with_default"):
is_true("computed" not in data[key])
compData = data["computed_col"]
is_true("computed" in compData)
is_true("sqltext" in compData["computed"])
eq_(self.normalize(compData["computed"]["sqltext"]), "normal+42")
eq_(
"persisted" in compData["computed"],
testing.requires.computed_columns_reflect_persisted.enabled,
)
if testing.requires.computed_columns_reflect_persisted.enabled:
eq_(
compData["computed"]["persisted"],
testing.requires.computed_columns_default_persisted.enabled,
)
def check_column(self, data, column, sqltext, persisted):
is_true("computed" in data[column])
compData = data[column]["computed"]
eq_(self.normalize(compData["sqltext"]), sqltext)
if testing.requires.computed_columns_reflect_persisted.enabled:
is_true("persisted" in compData)
is_(compData["persisted"], persisted)
def test_get_column_returns_persisted(self):
insp = inspect(config.db)
cols = insp.get_columns("computed_column_table")
data = {c["name"]: c for c in cols}
self.check_column(
data,
"computed_no_flag",
"normal+42",
testing.requires.computed_columns_default_persisted.enabled,
)
if testing.requires.computed_columns_virtual.enabled:
self.check_column(
data, "computed_virtual", "normal+2", False,
)
if testing.requires.computed_columns_stored.enabled:
self.check_column(
data, "computed_stored", "normal-42", True,
)
@testing.requires.schemas
def test_get_column_returns_persisted_with_schema(self):
insp = inspect(config.db)
cols = insp.get_columns(
"computed_column_table", schema=config.test_schema
)
data = {c["name"]: c for c in cols}
self.check_column(
data,
"computed_no_flag",
"normal/42",
testing.requires.computed_columns_default_persisted.enabled,
)
if testing.requires.computed_columns_virtual.enabled:
self.check_column(
data, "computed_virtual", "normal/2", False,
)
if testing.requires.computed_columns_stored.enabled:
self.check_column(
data, "computed_stored", "normal*42", True,
)
__all__ = (
"ComponentReflectionTest",
"HasTableTest",
"NormalizedNameTest",
"ComputedReflectionTest",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_cte.py | from .. import config
from .. import fixtures
from ..assertions import eq_
from ..schema import Column
from ..schema import Table
from ... import ForeignKey
from ... import Integer
from ... import select
from ... import String
from ... import testing
class CTETest(fixtures.TablesTest):
__backend__ = True
__requires__ = ("ctes",)
run_inserts = "each"
run_deletes = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
Column("parent_id", ForeignKey("some_table.id")),
)
Table(
"some_other_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
Column("parent_id", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "data": "d1", "parent_id": None},
{"id": 2, "data": "d2", "parent_id": 1},
{"id": 3, "data": "d3", "parent_id": 1},
{"id": 4, "data": "d4", "parent_id": 3},
{"id": 5, "data": "d5", "parent_id": 3},
],
)
def test_select_nonrecursive_round_trip(self):
some_table = self.tables.some_table
with config.db.connect() as conn:
cte = (
select([some_table])
.where(some_table.c.data.in_(["d2", "d3", "d4"]))
.cte("some_cte")
)
result = conn.execute(
select([cte.c.data]).where(cte.c.data.in_(["d4", "d5"]))
)
eq_(result.fetchall(), [("d4",)])
def test_select_recursive_round_trip(self):
some_table = self.tables.some_table
with config.db.connect() as conn:
cte = (
select([some_table])
.where(some_table.c.data.in_(["d2", "d3", "d4"]))
.cte("some_cte", recursive=True)
)
cte_alias = cte.alias("c1")
st1 = some_table.alias()
# note that SQL Server requires this to be UNION ALL,
# can't be UNION
cte = cte.union_all(
select([st1]).where(st1.c.id == cte_alias.c.parent_id)
)
result = conn.execute(
select([cte.c.data])
.where(cte.c.data != "d2")
.order_by(cte.c.data.desc())
)
eq_(
result.fetchall(),
[("d4",), ("d3",), ("d3",), ("d1",), ("d1",), ("d1",)],
)
def test_insert_from_select_round_trip(self):
some_table = self.tables.some_table
some_other_table = self.tables.some_other_table
with config.db.connect() as conn:
cte = (
select([some_table])
.where(some_table.c.data.in_(["d2", "d3", "d4"]))
.cte("some_cte")
)
conn.execute(
some_other_table.insert().from_select(
["id", "data", "parent_id"], select([cte])
)
)
eq_(
conn.execute(
select([some_other_table]).order_by(some_other_table.c.id)
).fetchall(),
[(2, "d2", 1), (3, "d3", 1), (4, "d4", 3)],
)
@testing.requires.ctes_with_update_delete
@testing.requires.update_from
def test_update_from_round_trip(self):
some_table = self.tables.some_table
some_other_table = self.tables.some_other_table
with config.db.connect() as conn:
conn.execute(
some_other_table.insert().from_select(
["id", "data", "parent_id"], select([some_table])
)
)
cte = (
select([some_table])
.where(some_table.c.data.in_(["d2", "d3", "d4"]))
.cte("some_cte")
)
conn.execute(
some_other_table.update()
.values(parent_id=5)
.where(some_other_table.c.data == cte.c.data)
)
eq_(
conn.execute(
select([some_other_table]).order_by(some_other_table.c.id)
).fetchall(),
[
(1, "d1", None),
(2, "d2", 5),
(3, "d3", 5),
(4, "d4", 5),
(5, "d5", 3),
],
)
@testing.requires.ctes_with_update_delete
@testing.requires.delete_from
def test_delete_from_round_trip(self):
some_table = self.tables.some_table
some_other_table = self.tables.some_other_table
with config.db.connect() as conn:
conn.execute(
some_other_table.insert().from_select(
["id", "data", "parent_id"], select([some_table])
)
)
cte = (
select([some_table])
.where(some_table.c.data.in_(["d2", "d3", "d4"]))
.cte("some_cte")
)
conn.execute(
some_other_table.delete().where(
some_other_table.c.data == cte.c.data
)
)
eq_(
conn.execute(
select([some_other_table]).order_by(some_other_table.c.id)
).fetchall(),
[(1, "d1", None), (5, "d5", 3)],
)
@testing.requires.ctes_with_update_delete
def test_delete_scalar_subq_round_trip(self):
some_table = self.tables.some_table
some_other_table = self.tables.some_other_table
with config.db.connect() as conn:
conn.execute(
some_other_table.insert().from_select(
["id", "data", "parent_id"], select([some_table])
)
)
cte = (
select([some_table])
.where(some_table.c.data.in_(["d2", "d3", "d4"]))
.cte("some_cte")
)
conn.execute(
some_other_table.delete().where(
some_other_table.c.data
== select([cte.c.data]).where(
cte.c.id == some_other_table.c.id
)
)
)
eq_(
conn.execute(
select([some_other_table]).order_by(some_other_table.c.id)
).fetchall(),
[(1, "d1", None), (5, "d5", 3)],
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/testing/suite/test_types.py | # coding: utf-8
import datetime
import decimal
import json
from .. import config
from .. import engines
from .. import fixtures
from .. import mock
from ..assertions import eq_
from ..assertions import is_
from ..config import requirements
from ..schema import Column
from ..schema import Table
from ... import and_
from ... import BigInteger
from ... import bindparam
from ... import Boolean
from ... import case
from ... import cast
from ... import Date
from ... import DateTime
from ... import Float
from ... import Integer
from ... import JSON
from ... import literal
from ... import MetaData
from ... import null
from ... import Numeric
from ... import select
from ... import String
from ... import testing
from ... import Text
from ... import Time
from ... import TIMESTAMP
from ... import type_coerce
from ... import Unicode
from ... import UnicodeText
from ... import util
from ...ext.declarative import declarative_base
from ...orm import Session
from ...util import u
class _LiteralRoundTripFixture(object):
supports_whereclause = True
@testing.provide_metadata
def _literal_round_trip(self, type_, input_, output, filter_=None):
"""test literal rendering """
# for literal, we test the literal render in an INSERT
# into a typed column. we can then SELECT it back as its
# official type; ideally we'd be able to use CAST here
# but MySQL in particular can't CAST fully
t = Table("t", self.metadata, Column("x", type_))
t.create()
with testing.db.connect() as conn:
for value in input_:
ins = (
t.insert()
.values(x=literal(value))
.compile(
dialect=testing.db.dialect,
compile_kwargs=dict(literal_binds=True),
)
)
conn.execute(ins)
if self.supports_whereclause:
stmt = t.select().where(t.c.x == literal(value))
else:
stmt = t.select()
stmt = stmt.compile(
dialect=testing.db.dialect,
compile_kwargs=dict(literal_binds=True),
)
for row in conn.execute(stmt):
value = row[0]
if filter_ is not None:
value = filter_(value)
assert value in output
class _UnicodeFixture(_LiteralRoundTripFixture):
__requires__ = ("unicode_data",)
data = u(
"Alors vous imaginez ma 🐍 surprise, au lever du jour, "
"quand une drôle de petite 🐍 voix m’a réveillé. Elle "
"disait: « S’il vous plaît… dessine-moi 🐍 un mouton! »"
)
@property
def supports_whereclause(self):
return config.requirements.expressions_against_unbounded_text.enabled
@classmethod
def define_tables(cls, metadata):
Table(
"unicode_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("unicode_data", cls.datatype),
)
def test_round_trip(self):
unicode_table = self.tables.unicode_table
config.db.execute(unicode_table.insert(), {"unicode_data": self.data})
row = config.db.execute(select([unicode_table.c.unicode_data])).first()
eq_(row, (self.data,))
assert isinstance(row[0], util.text_type)
def test_round_trip_executemany(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
[{"unicode_data": self.data} for i in range(3)],
)
rows = config.db.execute(
select([unicode_table.c.unicode_data])
).fetchall()
eq_(rows, [(self.data,) for i in range(3)])
for row in rows:
assert isinstance(row[0], util.text_type)
def _test_null_strings(self, connection):
unicode_table = self.tables.unicode_table
connection.execute(unicode_table.insert(), {"unicode_data": None})
row = connection.execute(
select([unicode_table.c.unicode_data])
).first()
eq_(row, (None,))
def _test_empty_strings(self, connection):
unicode_table = self.tables.unicode_table
connection.execute(unicode_table.insert(), {"unicode_data": u("")})
row = connection.execute(
select([unicode_table.c.unicode_data])
).first()
eq_(row, (u(""),))
def test_literal(self):
self._literal_round_trip(self.datatype, [self.data], [self.data])
def test_literal_non_ascii(self):
self._literal_round_trip(
self.datatype, [util.u("réve🐍 illé")], [util.u("réve🐍 illé")]
)
class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = ("unicode_data",)
__backend__ = True
datatype = Unicode(255)
@requirements.empty_strings_varchar
def test_empty_strings_varchar(self, connection):
self._test_empty_strings(connection)
def test_null_strings_varchar(self, connection):
self._test_null_strings(connection)
class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = "unicode_data", "text_type"
__backend__ = True
datatype = UnicodeText()
@requirements.empty_strings_text
def test_empty_strings_text(self, connection):
self._test_empty_strings(connection)
def test_null_strings_text(self, connection):
self._test_null_strings(connection)
class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__requires__ = ("text_type",)
__backend__ = True
@property
def supports_whereclause(self):
return config.requirements.expressions_against_unbounded_text.enabled
@classmethod
def define_tables(cls, metadata):
Table(
"text_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("text_data", Text),
)
def test_text_roundtrip(self):
text_table = self.tables.text_table
config.db.execute(text_table.insert(), {"text_data": "some text"})
row = config.db.execute(select([text_table.c.text_data])).first()
eq_(row, ("some text",))
@testing.requires.empty_strings_text
def test_text_empty_strings(self, connection):
text_table = self.tables.text_table
connection.execute(text_table.insert(), {"text_data": ""})
row = connection.execute(select([text_table.c.text_data])).first()
eq_(row, ("",))
def test_text_null_strings(self, connection):
text_table = self.tables.text_table
connection.execute(text_table.insert(), {"text_data": None})
row = connection.execute(select([text_table.c.text_data])).first()
eq_(row, (None,))
def test_literal(self):
self._literal_round_trip(Text, ["some text"], ["some text"])
def test_literal_non_ascii(self):
self._literal_round_trip(
Text, [util.u("réve🐍 illé")], [util.u("réve🐍 illé")]
)
def test_literal_quoting(self):
data = """some 'text' hey "hi there" that's text"""
self._literal_round_trip(Text, [data], [data])
def test_literal_backslashes(self):
data = r"backslash one \ backslash two \\ end"
self._literal_round_trip(Text, [data], [data])
def test_literal_percentsigns(self):
data = r"percent % signs %% percent"
self._literal_round_trip(Text, [data], [data])
class StringTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
@requirements.unbounded_varchar
def test_nolength_string(self):
metadata = MetaData()
foo = Table("foo", metadata, Column("one", String))
foo.create(config.db)
foo.drop(config.db)
def test_literal(self):
# note that in Python 3, this invokes the Unicode
# datatype for the literal part because all strings are unicode
self._literal_round_trip(String(40), ["some text"], ["some text"])
def test_literal_non_ascii(self):
self._literal_round_trip(
String(40), [util.u("réve🐍 illé")], [util.u("réve🐍 illé")]
)
def test_literal_quoting(self):
data = """some 'text' hey "hi there" that's text"""
self._literal_round_trip(String(40), [data], [data])
def test_literal_backslashes(self):
data = r"backslash one \ backslash two \\ end"
self._literal_round_trip(String(40), [data], [data])
class _DateFixture(_LiteralRoundTripFixture):
compare = None
@classmethod
def define_tables(cls, metadata):
Table(
"date_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("date_data", cls.datatype),
)
def test_round_trip(self):
date_table = self.tables.date_table
config.db.execute(date_table.insert(), {"date_data": self.data})
row = config.db.execute(select([date_table.c.date_data])).first()
compare = self.compare or self.data
eq_(row, (compare,))
assert isinstance(row[0], type(compare))
def test_null(self):
date_table = self.tables.date_table
config.db.execute(date_table.insert(), {"date_data": None})
row = config.db.execute(select([date_table.c.date_data])).first()
eq_(row, (None,))
@testing.requires.datetime_literals
def test_literal(self):
compare = self.compare or self.data
self._literal_round_trip(self.datatype, [self.data], [compare])
@testing.requires.standalone_null_binds_whereclause
def test_null_bound_comparison(self):
# this test is based on an Oracle issue observed in #4886.
# passing NULL for an expression that needs to be interpreted as
# a certain type, does the DBAPI have the info it needs to do this.
date_table = self.tables.date_table
with config.db.connect() as conn:
result = conn.execute(
date_table.insert(), {"date_data": self.data}
)
id_ = result.inserted_primary_key[0]
stmt = select([date_table.c.id]).where(
case(
[
(
bindparam("foo", type_=self.datatype) != None,
bindparam("foo", type_=self.datatype),
)
],
else_=date_table.c.date_data,
)
== date_table.c.date_data
)
row = conn.execute(stmt, {"foo": None}).first()
eq_(row[0], id_)
class DateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("datetime",)
__backend__ = True
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("datetime_microseconds",)
__backend__ = True
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396)
class TimestampMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("timestamp_microseconds",)
__backend__ = True
datatype = TIMESTAMP
data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396)
class TimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("time",)
__backend__ = True
datatype = Time
data = datetime.time(12, 57, 18)
class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("time_microseconds",)
__backend__ = True
datatype = Time
data = datetime.time(12, 57, 18, 396)
class DateTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("date",)
__backend__ = True
datatype = Date
data = datetime.date(2012, 10, 15)
class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = "date", "date_coerces_from_datetime"
__backend__ = True
datatype = Date
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
compare = datetime.date(2012, 10, 15)
class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("datetime_historic",)
__backend__ = True
datatype = DateTime
data = datetime.datetime(1850, 11, 10, 11, 52, 35)
class DateHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = ("date_historic",)
__backend__ = True
datatype = Date
data = datetime.date(1727, 4, 1)
class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
def test_literal(self):
self._literal_round_trip(Integer, [5], [5])
def test_huge_int(self):
self._round_trip(BigInteger, 1376537018368127)
@testing.provide_metadata
def _round_trip(self, datatype, data):
metadata = self.metadata
int_table = Table(
"integer_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("integer_data", datatype),
)
metadata.create_all(config.db)
config.db.execute(int_table.insert(), {"integer_data": data})
row = config.db.execute(select([int_table.c.integer_data])).first()
eq_(row, (data,))
if util.py3k:
assert isinstance(row[0], int)
else:
assert isinstance(row[0], (long, int)) # noqa
class NumericTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
@testing.provide_metadata
def _do_test(self, type_, input_, output, filter_=None, check_scale=False):
metadata = self.metadata
t = Table("t", metadata, Column("x", type_))
t.create()
t.insert().execute([{"x": x} for x in input_])
result = {row[0] for row in t.select().execute()}
output = set(output)
if filter_:
result = set(filter_(x) for x in result)
output = set(filter_(x) for x in output)
eq_(result, output)
if check_scale:
eq_([str(x) for x in result], [str(x) for x in output])
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_render_literal_numeric(self):
self._literal_round_trip(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563")],
[decimal.Decimal("15.7563")],
)
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_render_literal_numeric_asfloat(self):
self._literal_round_trip(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
)
def test_render_literal_float(self):
self._literal_round_trip(
Float(4),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None,
)
@testing.requires.precision_generic_float_type
def test_float_custom_scale(self):
self._do_test(
Float(None, decimal_return_scale=7, asdecimal=True),
[15.7563827, decimal.Decimal("15.7563827")],
[decimal.Decimal("15.7563827")],
check_scale=True,
)
def test_numeric_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563")],
[decimal.Decimal("15.7563")],
)
def test_numeric_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
)
@testing.requires.fetch_null_from_numeric
def test_numeric_null_as_decimal(self):
self._do_test(Numeric(precision=8, scale=4), [None], [None])
@testing.requires.fetch_null_from_numeric
def test_numeric_null_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False), [None], [None]
)
@testing.requires.floats_to_four_decimals
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None,
)
def test_float_coerce_round_trip(self):
expr = 15.7563
val = testing.db.scalar(select([literal(expr)]))
eq_(val, expr)
# this does not work in MySQL, see #4036, however we choose not
# to render CAST unconditionally since this is kind of an edge case.
@testing.requires.implicit_decimal_binds
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_decimal_coerce_round_trip(self):
expr = decimal.Decimal("15.7563")
val = testing.db.scalar(select([literal(expr)]))
eq_(val, expr)
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_decimal_coerce_round_trip_w_cast(self):
expr = decimal.Decimal("15.7563")
val = testing.db.scalar(select([cast(expr, Numeric(10, 4))]))
eq_(val, expr)
@testing.requires.precision_numerics_general
def test_precision_decimal(self):
numbers = set(
[
decimal.Decimal("54.234246451650"),
decimal.Decimal("0.004354"),
decimal.Decimal("900.0"),
]
)
self._do_test(Numeric(precision=18, scale=12), numbers, numbers)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal(self):
"""test exceedingly small decimals.
Decimal reports values with E notation when the exponent
is greater than 6.
"""
numbers = set(
[
decimal.Decimal("1E-2"),
decimal.Decimal("1E-3"),
decimal.Decimal("1E-4"),
decimal.Decimal("1E-5"),
decimal.Decimal("1E-6"),
decimal.Decimal("1E-7"),
decimal.Decimal("1E-8"),
decimal.Decimal("0.01000005940696"),
decimal.Decimal("0.00000005940696"),
decimal.Decimal("0.00000000000696"),
decimal.Decimal("0.70000000000696"),
decimal.Decimal("696E-12"),
]
)
self._do_test(Numeric(precision=18, scale=14), numbers, numbers)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal_large(self):
"""test exceedingly large decimals.
"""
numbers = set(
[
decimal.Decimal("4E+8"),
decimal.Decimal("5748E+15"),
decimal.Decimal("1.521E+15"),
decimal.Decimal("00000000000000.1E+12"),
]
)
self._do_test(Numeric(precision=25, scale=2), numbers, numbers)
@testing.requires.precision_numerics_many_significant_digits
def test_many_significant_digits(self):
numbers = set(
[
decimal.Decimal("31943874831932418390.01"),
decimal.Decimal("319438950232418390.273596"),
decimal.Decimal("87673.594069654243"),
]
)
self._do_test(Numeric(precision=38, scale=12), numbers, numbers)
@testing.requires.precision_numerics_retains_significant_digits
def test_numeric_no_decimal(self):
numbers = set([decimal.Decimal("1.000")])
self._do_test(
Numeric(precision=5, scale=3), numbers, numbers, check_scale=True
)
class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"boolean_table",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("value", Boolean),
Column("unconstrained_value", Boolean(create_constraint=False)),
)
def test_render_literal_bool(self):
self._literal_round_trip(Boolean(), [True, False], [True, False])
def test_round_trip(self):
boolean_table = self.tables.boolean_table
config.db.execute(
boolean_table.insert(),
{"id": 1, "value": True, "unconstrained_value": False},
)
row = config.db.execute(
select(
[boolean_table.c.value, boolean_table.c.unconstrained_value]
)
).first()
eq_(row, (True, False))
assert isinstance(row[0], bool)
@testing.requires.nullable_booleans
def test_null(self):
boolean_table = self.tables.boolean_table
config.db.execute(
boolean_table.insert(),
{"id": 1, "value": None, "unconstrained_value": None},
)
row = config.db.execute(
select(
[boolean_table.c.value, boolean_table.c.unconstrained_value]
)
).first()
eq_(row, (None, None))
def test_whereclause(self):
# testing "WHERE <column>" renders a compatible expression
boolean_table = self.tables.boolean_table
with config.db.connect() as conn:
conn.execute(
boolean_table.insert(),
[
{"id": 1, "value": True, "unconstrained_value": True},
{"id": 2, "value": False, "unconstrained_value": False},
],
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(boolean_table.c.value)
),
1,
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(
boolean_table.c.unconstrained_value
)
),
1,
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(~boolean_table.c.value)
),
2,
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(
~boolean_table.c.unconstrained_value
)
),
2,
)
class JSONTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__requires__ = ("json_type",)
__backend__ = True
datatype = JSON
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30), nullable=False),
Column("data", cls.datatype),
Column("nulldata", cls.datatype(none_as_null=True)),
)
def test_round_trip_data1(self):
self._test_round_trip({"key1": "value1", "key2": "value2"})
def _test_round_trip(self, data_element):
data_table = self.tables.data_table
config.db.execute(
data_table.insert(), {"name": "row1", "data": data_element}
)
row = config.db.execute(select([data_table.c.data])).first()
eq_(row, (data_element,))
def _index_fixtures(fn):
fn = testing.combinations(
("boolean", True),
("boolean", False),
("boolean", None),
("string", "some string"),
("string", None),
("string", util.u("réve illé")),
(
"string",
util.u("réve🐍 illé"),
testing.requires.json_index_supplementary_unicode_element,
),
("integer", 15),
("integer", 1),
("integer", 0),
("integer", None),
("float", 28.5),
("float", None),
# TODO: how to test for comaprison
# ("json", {"foo": "bar"}),
id_="sa",
)(fn)
return fn
@_index_fixtures
def test_index_typed_access(self, datatype, value):
data_table = self.tables.data_table
data_element = {"key1": value}
with config.db.connect() as conn:
conn.execute(
data_table.insert(),
{
"name": "row1",
"data": data_element,
"nulldata": data_element,
},
)
expr = data_table.c.data["key1"]
expr = getattr(expr, "as_%s" % datatype)()
roundtrip = conn.scalar(select([expr]))
eq_(roundtrip, value)
if util.py3k: # skip py2k to avoid comparing unicode to str etc.
is_(type(roundtrip), type(value))
@_index_fixtures
def test_index_typed_comparison(self, datatype, value):
data_table = self.tables.data_table
data_element = {"key1": value}
with config.db.connect() as conn:
conn.execute(
data_table.insert(),
{
"name": "row1",
"data": data_element,
"nulldata": data_element,
},
)
expr = data_table.c.data["key1"]
expr = getattr(expr, "as_%s" % datatype)()
row = conn.execute(select([expr]).where(expr == value)).first()
# make sure we get a row even if value is None
eq_(row, (value,))
@_index_fixtures
def test_path_typed_comparison(self, datatype, value):
data_table = self.tables.data_table
data_element = {"key1": {"subkey1": value}}
with config.db.connect() as conn:
conn.execute(
data_table.insert(),
{
"name": "row1",
"data": data_element,
"nulldata": data_element,
},
)
expr = data_table.c.data[("key1", "subkey1")]
expr = getattr(expr, "as_%s" % datatype)()
row = conn.execute(select([expr]).where(expr == value)).first()
# make sure we get a row even if value is None
eq_(row, (value,))
@testing.combinations(
(True,),
(False,),
(None,),
(15,),
(0,),
(-1,),
(-1.0,),
(15.052,),
("a string",),
(util.u("réve illé"),),
(util.u("réve🐍 illé"),),
)
def test_single_element_round_trip(self, element):
data_table = self.tables.data_table
data_element = element
with config.db.connect() as conn:
conn.execute(
data_table.insert(),
{
"name": "row1",
"data": data_element,
"nulldata": data_element,
},
)
row = conn.execute(
select([data_table.c.data, data_table.c.nulldata])
).first()
eq_(row, (data_element, data_element))
def test_round_trip_custom_json(self):
data_table = self.tables.data_table
data_element = {"key1": "data1"}
js = mock.Mock(side_effect=json.dumps)
jd = mock.Mock(side_effect=json.loads)
engine = engines.testing_engine(
options=dict(json_serializer=js, json_deserializer=jd)
)
# support sqlite :memory: database...
data_table.create(engine, checkfirst=True)
with engine.connect() as conn:
conn.execute(
data_table.insert(), {"name": "row1", "data": data_element}
)
row = conn.execute(select([data_table.c.data])).first()
eq_(row, (data_element,))
eq_(js.mock_calls, [mock.call(data_element)])
eq_(jd.mock_calls, [mock.call(json.dumps(data_element))])
def test_round_trip_none_as_sql_null(self, connection):
col = self.tables.data_table.c["nulldata"]
conn = connection
conn.execute(
self.tables.data_table.insert(), {"name": "r1", "data": None}
)
eq_(
conn.scalar(
select([self.tables.data_table.c.name]).where(col.is_(null()))
),
"r1",
)
eq_(conn.scalar(select([col])), None)
def test_round_trip_json_null_as_json_null(self, connection):
col = self.tables.data_table.c["data"]
conn = connection
conn.execute(
self.tables.data_table.insert(), {"name": "r1", "data": JSON.NULL},
)
eq_(
conn.scalar(
select([self.tables.data_table.c.name]).where(
cast(col, String) == "null"
)
),
"r1",
)
eq_(conn.scalar(select([col])), None)
def test_round_trip_none_as_json_null(self):
col = self.tables.data_table.c["data"]
with config.db.connect() as conn:
conn.execute(
self.tables.data_table.insert(), {"name": "r1", "data": None}
)
eq_(
conn.scalar(
select([self.tables.data_table.c.name]).where(
cast(col, String) == "null"
)
),
"r1",
)
eq_(conn.scalar(select([col])), None)
def test_unicode_round_trip(self):
# note we include Unicode supplementary characters as well
with config.db.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{
"name": "r1",
"data": {
util.u("réve🐍 illé"): util.u("réve🐍 illé"),
"data": {"k1": util.u("drôl🐍e")},
},
},
)
eq_(
conn.scalar(select([self.tables.data_table.c.data])),
{
util.u("réve🐍 illé"): util.u("réve🐍 illé"),
"data": {"k1": util.u("drôl🐍e")},
},
)
def test_eval_none_flag_orm(self):
Base = declarative_base()
class Data(Base):
__table__ = self.tables.data_table
s = Session(testing.db)
d1 = Data(name="d1", data=None, nulldata=None)
s.add(d1)
s.commit()
s.bulk_insert_mappings(
Data, [{"name": "d2", "data": None, "nulldata": None}]
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String()),
cast(self.tables.data_table.c.nulldata, String),
)
.filter(self.tables.data_table.c.name == "d1")
.first(),
("null", None),
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String()),
cast(self.tables.data_table.c.nulldata, String),
)
.filter(self.tables.data_table.c.name == "d2")
.first(),
("null", None),
)
class JSONStringCastIndexTest(_LiteralRoundTripFixture, fixtures.TablesTest):
"""test JSON index access with "cast to string", which we have documented
for a long time as how to compare JSON values, but is ultimately not
reliable in all cases.
"""
__requires__ = ("json_type",)
__backend__ = True
datatype = JSON
data1 = {"key1": "value1", "key2": "value2"}
data2 = {
"Key 'One'": "value1",
"key two": "value2",
"key three": "value ' three '",
}
data3 = {
"key1": [1, 2, 3],
"key2": ["one", "two", "three"],
"key3": [{"four": "five"}, {"six": "seven"}],
}
data4 = ["one", "two", "three"]
data5 = {
"nested": {
"elem1": [{"a": "b", "c": "d"}, {"e": "f", "g": "h"}],
"elem2": {"elem3": {"elem4": "elem5"}},
}
}
data6 = {"a": 5, "b": "some value", "c": {"foo": "bar"}}
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30), nullable=False),
Column("data", cls.datatype),
Column("nulldata", cls.datatype(none_as_null=True)),
)
def _criteria_fixture(self):
config.db.execute(
self.tables.data_table.insert(),
[
{"name": "r1", "data": self.data1},
{"name": "r2", "data": self.data2},
{"name": "r3", "data": self.data3},
{"name": "r4", "data": self.data4},
{"name": "r5", "data": self.data5},
{"name": "r6", "data": self.data6},
],
)
def _test_index_criteria(self, crit, expected, test_literal=True):
self._criteria_fixture()
with config.db.connect() as conn:
stmt = select([self.tables.data_table.c.name]).where(crit)
eq_(conn.scalar(stmt), expected)
if test_literal:
literal_sql = str(
stmt.compile(
config.db, compile_kwargs={"literal_binds": True}
)
)
eq_(conn.scalar(literal_sql), expected)
def test_string_cast_crit_spaces_in_key(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
# limit the rows here to avoid PG error
# "cannot extract field from a non-object", which is
# fixed in 9.4 but may exist in 9.3
self._test_index_criteria(
and_(
name.in_(["r1", "r2", "r3"]),
cast(col["key two"], String) == '"value2"',
),
"r2",
)
@config.requirements.json_array_indexes
def test_string_cast_crit_simple_int(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
# limit the rows here to avoid PG error
# "cannot extract array element from a non-array", which is
# fixed in 9.4 but may exist in 9.3
self._test_index_criteria(
and_(name == "r4", cast(col[1], String) == '"two"'), "r4"
)
def test_string_cast_crit_mixed_path(self):
col = self.tables.data_table.c["data"]
self._test_index_criteria(
cast(col[("key3", 1, "six")], String) == '"seven"', "r3"
)
def test_string_cast_crit_string_path(self):
col = self.tables.data_table.c["data"]
self._test_index_criteria(
cast(col[("nested", "elem2", "elem3", "elem4")], String)
== '"elem5"',
"r5",
)
def test_string_cast_crit_against_string_basic(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
self._test_index_criteria(
and_(name == "r6", cast(col["b"], String) == '"some value"'), "r6"
)
def test_crit_against_string_coerce_type(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
self._test_index_criteria(
and_(
name == "r6",
cast(col["b"], String) == type_coerce("some value", JSON),
),
"r6",
test_literal=False,
)
def test_crit_against_int_basic(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
self._test_index_criteria(
and_(name == "r6", cast(col["a"], String) == "5"), "r6"
)
def test_crit_against_int_coerce_type(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c["data"]
self._test_index_criteria(
and_(name == "r6", cast(col["a"], String) == type_coerce(5, JSON)),
"r6",
test_literal=False,
)
__all__ = (
"UnicodeVarcharTest",
"UnicodeTextTest",
"JSONTest",
"JSONStringCastIndexTest",
"DateTest",
"DateTimeTest",
"TextTest",
"NumericTest",
"IntegerTest",
"DateTimeHistoricTest",
"DateTimeCoercedToDateTimeTest",
"TimeMicrosecondsTest",
"TimestampMicrosecondsTest",
"TimeTest",
"DateTimeMicrosecondsTest",
"DateHistoricTest",
"StringTest",
"BooleanTest",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/__init__.py | # dialects/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__all__ = (
"firebird",
"mssql",
"mysql",
"oracle",
"postgresql",
"sqlite",
"sybase",
)
from .. import util
_translates = {"postgres": "postgresql"}
def _auto_fn(name):
"""default dialect importer.
plugs into the :class:`.PluginLoader`
as a first-hit system.
"""
if "." in name:
dialect, driver = name.split(".")
else:
dialect = name
driver = "base"
if dialect in _translates:
translated = _translates[dialect]
util.warn_deprecated(
"The '%s' dialect name has been "
"renamed to '%s'" % (dialect, translated)
)
dialect = translated
try:
if dialect == "firebird":
try:
module = __import__("sqlalchemy_firebird")
except ImportError:
module = __import__("sqlalchemy.dialects.firebird").dialects
module = getattr(module, dialect)
elif dialect == "sybase":
try:
module = __import__("sqlalchemy_sybase")
except ImportError:
module = __import__("sqlalchemy.dialects.sybase").dialects
module = getattr(module, dialect)
else:
module = __import__("sqlalchemy.dialects.%s" % (dialect,)).dialects
module = getattr(module, dialect)
except ImportError:
return None
if hasattr(module, driver):
module = getattr(module, driver)
return lambda: module.dialect
else:
return None
registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn)
plugins = util.PluginLoader("sqlalchemy.plugins")
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sybase/mxodbc.py | # sybase/mxodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+mxodbc
:name: mxODBC
:dbapi: mxodbc
:connectstring: sybase+mxodbc://<username>:<password>@<dsnname>
:url: http://www.egenix.com/
.. note::
This dialect is a stub only and is likely non functional at this time.
"""
from sqlalchemy.connectors.mxodbc import MxODBCConnector
from sqlalchemy.dialects.sybase.base import SybaseDialect
from sqlalchemy.dialects.sybase.base import SybaseExecutionContext
class SybaseExecutionContext_mxodbc(SybaseExecutionContext):
pass
class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect):
execution_ctx_cls = SybaseExecutionContext_mxodbc
dialect = SybaseDialect_mxodbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sybase/__init__.py | # sybase/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base # noqa
from . import pyodbc # noqa
from . import pysybase # noqa
from .base import BIGINT
from .base import BINARY
from .base import BIT
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import FLOAT
from .base import IMAGE
from .base import INT
from .base import INTEGER
from .base import MONEY
from .base import NCHAR
from .base import NUMERIC
from .base import NVARCHAR
from .base import SMALLINT
from .base import SMALLMONEY
from .base import TEXT
from .base import TIME
from .base import TINYINT
from .base import UNICHAR
from .base import UNITEXT
from .base import UNIVARCHAR
from .base import VARBINARY
from .base import VARCHAR
# default dialect
base.dialect = dialect = pyodbc.dialect
__all__ = (
"CHAR",
"VARCHAR",
"TIME",
"NCHAR",
"NVARCHAR",
"TEXT",
"DATE",
"DATETIME",
"FLOAT",
"NUMERIC",
"BIGINT",
"INT",
"INTEGER",
"SMALLINT",
"BINARY",
"VARBINARY",
"UNITEXT",
"UNICHAR",
"UNIVARCHAR",
"IMAGE",
"BIT",
"MONEY",
"SMALLMONEY",
"TINYINT",
"dialect",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sybase/pysybase.py | # sybase/pysybase.py
# Copyright (C) 2010-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+pysybase
:name: Python-Sybase
:dbapi: Sybase
:connectstring: sybase+pysybase://<username>:<password>@<dsn>/[database name]
:url: http://python-sybase.sourceforge.net/
Unicode Support
---------------
The python-sybase driver does not appear to support non-ASCII strings of any
kind at this time.
""" # noqa
from sqlalchemy import processors
from sqlalchemy import types as sqltypes
from sqlalchemy.dialects.sybase.base import SybaseDialect
from sqlalchemy.dialects.sybase.base import SybaseExecutionContext
from sqlalchemy.dialects.sybase.base import SybaseSQLCompiler
class _SybNumeric(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class SybaseExecutionContext_pysybase(SybaseExecutionContext):
def set_ddl_autocommit(self, dbapi_connection, value):
if value:
# call commit() on the Sybase connection directly,
# to avoid any side effects of calling a Connection
# transactional method inside of pre_exec()
dbapi_connection.commit()
def pre_exec(self):
SybaseExecutionContext.pre_exec(self)
for param in self.parameters:
for key in list(param):
param["@" + key] = param[key]
del param[key]
class SybaseSQLCompiler_pysybase(SybaseSQLCompiler):
def bindparam_string(self, name, **kw):
return "@" + name
class SybaseDialect_pysybase(SybaseDialect):
driver = "pysybase"
execution_ctx_cls = SybaseExecutionContext_pysybase
statement_compiler = SybaseSQLCompiler_pysybase
colspecs = {sqltypes.Numeric: _SybNumeric, sqltypes.Float: sqltypes.Float}
@classmethod
def dbapi(cls):
import Sybase
return Sybase
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user", password="passwd")
return ([opts.pop("host")], opts)
def do_executemany(self, cursor, statement, parameters, context=None):
# calling python-sybase executemany yields:
# TypeError: string too long for buffer
for param in parameters:
cursor.execute(statement, param)
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version_number")
# i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
# (12, 5, 0, 0)
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
msg = str(e)
return (
"Unable to complete network request to host" in msg
or "Invalid connection state" in msg
or "Invalid cursor state" in msg
)
else:
return False
dialect = SybaseDialect_pysybase
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sybase/pyodbc.py | # sybase/pyodbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: sybase+pyodbc://<username>:<password>@<dsnname>[/<database>]
:url: http://pypi.python.org/pypi/pyodbc/
Unicode Support
---------------
The pyodbc driver currently supports usage of these Sybase types with
Unicode or multibyte strings::
CHAR
NCHAR
NVARCHAR
TEXT
VARCHAR
Currently *not* supported are::
UNICHAR
UNITEXT
UNIVARCHAR
""" # noqa
import decimal
from sqlalchemy import processors
from sqlalchemy import types as sqltypes
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy.dialects.sybase.base import SybaseDialect
from sqlalchemy.dialects.sybase.base import SybaseExecutionContext
class _SybNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < -6 into floats.
It's not yet known how to get decimals with many
significant digits or very large adjusted() into Sybase
via pyodbc.
"""
def bind_processor(self, dialect):
super_process = super(_SybNumeric_pyodbc, self).bind_processor(dialect)
def process(value):
if self.asdecimal and isinstance(value, decimal.Decimal):
if value.adjusted() < -6:
return processors.to_float(value)
if super_process:
return super_process(value)
else:
return value
return process
class SybaseExecutionContext_pyodbc(SybaseExecutionContext):
def set_ddl_autocommit(self, connection, value):
if value:
connection.autocommit = True
else:
connection.autocommit = False
class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect):
execution_ctx_cls = SybaseExecutionContext_pyodbc
colspecs = {sqltypes.Numeric: _SybNumeric_pyodbc}
dialect = SybaseDialect_pyodbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sybase/base.py | # sybase/base.py
# Copyright (C) 2010-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect within SQLAlchemy **is not currently supported**.
It is not tested within continuous integration and is likely to have
many issues and caveats not currently handled. Consider using the
`external dialect <https://github.com/gordthompson/sqlalchemy-sybase>`_
instead.
"""
import re
from sqlalchemy import exc
from sqlalchemy import schema as sa_schema
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import text
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BINARY
from sqlalchemy.types import CHAR
from sqlalchemy.types import DATE
from sqlalchemy.types import DATETIME
from sqlalchemy.types import DECIMAL
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INT # noqa
from sqlalchemy.types import INTEGER
from sqlalchemy.types import NCHAR
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import NVARCHAR
from sqlalchemy.types import REAL
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
from sqlalchemy.types import Unicode
from sqlalchemy.types import VARBINARY
from sqlalchemy.types import VARCHAR
RESERVED_WORDS = set(
[
"add",
"all",
"alter",
"and",
"any",
"as",
"asc",
"backup",
"begin",
"between",
"bigint",
"binary",
"bit",
"bottom",
"break",
"by",
"call",
"capability",
"cascade",
"case",
"cast",
"char",
"char_convert",
"character",
"check",
"checkpoint",
"close",
"comment",
"commit",
"connect",
"constraint",
"contains",
"continue",
"convert",
"create",
"cross",
"cube",
"current",
"current_timestamp",
"current_user",
"cursor",
"date",
"dbspace",
"deallocate",
"dec",
"decimal",
"declare",
"default",
"delete",
"deleting",
"desc",
"distinct",
"do",
"double",
"drop",
"dynamic",
"else",
"elseif",
"encrypted",
"end",
"endif",
"escape",
"except",
"exception",
"exec",
"execute",
"existing",
"exists",
"externlogin",
"fetch",
"first",
"float",
"for",
"force",
"foreign",
"forward",
"from",
"full",
"goto",
"grant",
"group",
"having",
"holdlock",
"identified",
"if",
"in",
"index",
"index_lparen",
"inner",
"inout",
"insensitive",
"insert",
"inserting",
"install",
"instead",
"int",
"integer",
"integrated",
"intersect",
"into",
"iq",
"is",
"isolation",
"join",
"key",
"lateral",
"left",
"like",
"lock",
"login",
"long",
"match",
"membership",
"message",
"mode",
"modify",
"natural",
"new",
"no",
"noholdlock",
"not",
"notify",
"null",
"numeric",
"of",
"off",
"on",
"open",
"option",
"options",
"or",
"order",
"others",
"out",
"outer",
"over",
"passthrough",
"precision",
"prepare",
"primary",
"print",
"privileges",
"proc",
"procedure",
"publication",
"raiserror",
"readtext",
"real",
"reference",
"references",
"release",
"remote",
"remove",
"rename",
"reorganize",
"resource",
"restore",
"restrict",
"return",
"revoke",
"right",
"rollback",
"rollup",
"save",
"savepoint",
"scroll",
"select",
"sensitive",
"session",
"set",
"setuser",
"share",
"smallint",
"some",
"sqlcode",
"sqlstate",
"start",
"stop",
"subtrans",
"subtransaction",
"synchronize",
"syntax_error",
"table",
"temporary",
"then",
"time",
"timestamp",
"tinyint",
"to",
"top",
"tran",
"trigger",
"truncate",
"tsequal",
"unbounded",
"union",
"unique",
"unknown",
"unsigned",
"update",
"updating",
"user",
"using",
"validate",
"values",
"varbinary",
"varchar",
"variable",
"varying",
"view",
"wait",
"waitfor",
"when",
"where",
"while",
"window",
"with",
"with_cube",
"with_lparen",
"with_rollup",
"within",
"work",
"writetext",
]
)
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNICHAR"
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNIVARCHAR"
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = "UNITEXT"
class TINYINT(sqltypes.Integer):
__visit_name__ = "TINYINT"
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = "IMAGE"
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
"bigint": BIGINT,
"int": INTEGER,
"integer": INTEGER,
"smallint": SMALLINT,
"tinyint": TINYINT,
"unsigned bigint": BIGINT, # TODO: unsigned flags
"unsigned int": INTEGER, # TODO: unsigned flags
"unsigned smallint": SMALLINT, # TODO: unsigned flags
"numeric": NUMERIC,
"decimal": DECIMAL,
"dec": DECIMAL,
"float": FLOAT,
"double": NUMERIC, # TODO
"double precision": NUMERIC, # TODO
"real": REAL,
"smallmoney": SMALLMONEY,
"money": MONEY,
"smalldatetime": DATETIME,
"datetime": DATETIME,
"date": DATE,
"time": TIME,
"char": CHAR,
"character": CHAR,
"varchar": VARCHAR,
"character varying": VARCHAR,
"char varying": VARCHAR,
"unichar": UNICHAR,
"unicode character": UNIVARCHAR,
"nchar": NCHAR,
"national char": NCHAR,
"national character": NCHAR,
"nvarchar": NVARCHAR,
"nchar varying": NVARCHAR,
"national char varying": NVARCHAR,
"national character varying": NVARCHAR,
"text": TEXT,
"unitext": UNITEXT,
"binary": BINARY,
"varbinary": VARBINARY,
"image": IMAGE,
"bit": BIT,
# not in documentation for ASE 15.7
"long varchar": TEXT, # TODO
"timestamp": TIMESTAMP,
"uniqueidentifier": UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(
self.bind, table_name, schema, info_cache=self.info_cache
)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = (
seq_column.key in self.compiled_parameters[0]
)
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON"
% self.dialect.identifier_preparer.format_table(tbl)
)
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time."
)
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')"
)
self.set_ddl_autocommit(
self.root_connection.connection.connection, True
)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{"doy": "dayofyear", "dow": "weekday", "milliseconds": "millisecond"},
)
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += " ROWS LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += " ROWS"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ""
def order_by_clause(self, select, **kw):
kw["literal_binds"] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. FROM clause specific to Sybase."""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL"
)
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = (
isinstance(column.default, sa_schema.Sequence)
and column.default
)
if sequence:
start, increment = sequence.start or 1, sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element, include_schema=False),
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = "sybase"
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name").columns(username=Unicode)
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if (
self.server_version_info is not None
and self.server_version_info < (15,)
):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text(
"""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
if isinstance(table_name, unicode): # noqa
table_name = table_name.encode("ascii")
result = connection.execute(
TABLEID_SQL, schema_name=schema, table_name=table_name
)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
COLUMN_SQL = text(
"""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (
name,
type_,
nullable,
autoincrement,
default_,
precision,
scale,
length,
) in results:
col_info = self._get_column_info(
name,
type_,
bool(nullable),
bool(autoincrement),
default_,
precision,
scale,
length,
)
columns.append(col_info)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
autoincrement,
default,
precision,
scale,
length,
):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn(
"Did not recognize type '%s' of column '%s'" % (type_, name)
)
coltype = sqltypes.NULLTYPE
if default:
default = default.replace("DEFAULT", "").strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(
name=name,
type=coltype,
nullable=nullable,
default=default,
autoincrement=autoincrement,
)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text(
"""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text(
"""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
"""
)
referential_constraints = connection.execute(
REFCONSTRAINT_SQL, table_id=table_id
).fetchall()
REFTABLE_SQL = text(
"""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
"""
)
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (
schema is not None
or reftable["schema"] != self.default_schema_name
):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"],
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
INDEX_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {
"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names,
}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
PK_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
if pks:
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {
"constrained_columns": constrained_columns,
"name": pks["name"],
}
else:
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text(
"""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(view_name, unicode): # noqa
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/zxjdbc.py | # postgresql/zxjdbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: postgresql+zxjdbc://scott:tiger@localhost/db
:driverurl: http://jdbc.postgresql.org/
"""
from .base import PGDialect
from .base import PGExecutionContext
from ...connectors.zxJDBC import ZxJDBCConnector
class PGExecutionContext_zxjdbc(PGExecutionContext):
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
jdbc_db_name = "postgresql"
jdbc_driver_name = "org.postgresql.Driver"
execution_ctx_cls = PGExecutionContext_zxjdbc
supports_native_decimal = True
def __init__(self, *args, **kwargs):
super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
from com.ziclix.python.sql.handler import PostgresqlDataHandler
self.DataHandler = PostgresqlDataHandler
def _get_server_version_info(self, connection):
parts = connection.connection.dbversion.split(".")
return tuple(int(x) for x in parts)
dialect = PGDialect_zxjdbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py | # testing/engines.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+psycopg2cffi
:name: psycopg2cffi
:dbapi: psycopg2cffi
:connectstring: postgresql+psycopg2cffi://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2cffi/
``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C
layer. This makes it suitable for use in e.g. PyPy. Documentation
is as per ``psycopg2``.
.. versionadded:: 1.0.0
.. seealso::
:mod:`sqlalchemy.dialects.postgresql.psycopg2`
""" # noqa
from .psycopg2 import PGDialect_psycopg2
class PGDialect_psycopg2cffi(PGDialect_psycopg2):
driver = "psycopg2cffi"
supports_unicode_statements = True
# psycopg2cffi's first release is 2.5.0, but reports
# __version__ as 2.4.4. Subsequent releases seem to have
# fixed this.
FEATURE_VERSION_MAP = dict(
native_json=(2, 4, 4),
native_jsonb=(2, 7, 1),
sane_multi_rowcount=(2, 4, 4),
array_oid=(2, 4, 4),
hstore_adapter=(2, 4, 4),
)
@classmethod
def dbapi(cls):
return __import__("psycopg2cffi")
@classmethod
def _psycopg2_extensions(cls):
root = __import__("psycopg2cffi", fromlist=["extensions"])
return root.extensions
@classmethod
def _psycopg2_extras(cls):
root = __import__("psycopg2cffi", fromlist=["extras"])
return root.extras
dialect = PGDialect_psycopg2cffi
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/__init__.py | # postgresql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base
from . import pg8000 # noqa
from . import psycopg2 # noqa
from . import psycopg2cffi # noqa
from . import pygresql # noqa
from . import pypostgresql # noqa
from . import zxjdbc # noqa
from .array import All
from .array import Any
from .array import ARRAY
from .array import array
from .base import BIGINT
from .base import BIT
from .base import BOOLEAN
from .base import BYTEA
from .base import CHAR
from .base import CIDR
from .base import CreateEnumType
from .base import DATE
from .base import DOUBLE_PRECISION
from .base import DropEnumType
from .base import ENUM
from .base import FLOAT
from .base import INET
from .base import INTEGER
from .base import INTERVAL
from .base import MACADDR
from .base import MONEY
from .base import NUMERIC
from .base import OID
from .base import REAL
from .base import REGCLASS
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import TSVECTOR
from .base import UUID
from .base import VARCHAR
from .dml import Insert
from .dml import insert
from .ext import aggregate_order_by
from .ext import array_agg
from .ext import ExcludeConstraint
from .hstore import HSTORE
from .hstore import hstore
from .json import JSON
from .json import JSONB
from .ranges import DATERANGE
from .ranges import INT4RANGE
from .ranges import INT8RANGE
from .ranges import NUMRANGE
from .ranges import TSRANGE
from .ranges import TSTZRANGE
base.dialect = dialect = psycopg2.dialect
__all__ = (
"INTEGER",
"BIGINT",
"SMALLINT",
"VARCHAR",
"CHAR",
"TEXT",
"NUMERIC",
"FLOAT",
"REAL",
"INET",
"CIDR",
"UUID",
"BIT",
"MACADDR",
"MONEY",
"OID",
"REGCLASS",
"DOUBLE_PRECISION",
"TIMESTAMP",
"TIME",
"DATE",
"BYTEA",
"BOOLEAN",
"INTERVAL",
"ARRAY",
"ENUM",
"dialect",
"array",
"HSTORE",
"hstore",
"INT4RANGE",
"INT8RANGE",
"NUMRANGE",
"DATERANGE",
"TSVECTOR",
"TSRANGE",
"TSTZRANGE",
"JSON",
"JSONB",
"Any",
"All",
"DropEnumType",
"CreateEnumType",
"ExcludeConstraint",
"aggregate_order_by",
"array_agg",
"insert",
"Insert",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/pygresql.py | # postgresql/pygresql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pygresql
:name: pygresql
:dbapi: pgdb
:connectstring: postgresql+pygresql://user:password@host:port/dbname[?key=value&key=value...]
:url: http://www.pygresql.org/
.. note::
The pygresql dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended PostgreSQL
dialect is psycopg2.
""" # noqa
import decimal
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import PGCompiler
from .base import PGDialect
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import util
from ...sql.elements import Null
from ...types import JSON as Json
from ...types import Numeric
class _PGNumeric(Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if not isinstance(coltype, int):
coltype = coltype.oid
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# PyGreSQL returns Decimal natively for 1700 (numeric)
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# PyGreSQL returns float natively for 701 (float8)
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if not dialect.has_native_hstore:
return super(_PGHStore, self).bind_processor(dialect)
hstore = dialect.dbapi.Hstore
def process(value):
if isinstance(value, dict):
return hstore(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_hstore:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSON, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null
):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSONB, self).bind_processor(dialect)
json = dialect.dbapi.Json
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
value is None and self.none_as_null
):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_json:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not dialect.has_native_uuid:
return super(_PGUUID, self).bind_processor(dialect)
uuid = dialect.dbapi.Uuid
def process(value):
if value is None:
return None
if isinstance(value, (str, bytes)):
if len(value) == 16:
return uuid(bytes=value)
return uuid(value)
if isinstance(value, int):
return uuid(int=value)
return value
return process
def result_processor(self, dialect, coltype):
if not dialect.has_native_uuid:
return super(_PGUUID, self).result_processor(dialect, coltype)
if not self.as_uuid:
def process(value):
if value is not None:
return str(value)
return process
class _PGCompiler(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
def post_process_text(self, text):
return text.replace("%", "%%")
class _PGIdentifierPreparer(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class PGDialect_pygresql(PGDialect):
driver = "pygresql"
statement_compiler = _PGCompiler
preparer = _PGIdentifierPreparer
@classmethod
def dbapi(cls):
import pgdb
return pgdb
colspecs = util.update_copy(
PGDialect.colspecs,
{
Numeric: _PGNumeric,
HSTORE: _PGHStore,
Json: _PGJSON,
JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
},
)
def __init__(self, **kwargs):
super(PGDialect_pygresql, self).__init__(**kwargs)
try:
version = self.dbapi.version
m = re.match(r"(\d+)\.(\d+)", version)
version = (int(m.group(1)), int(m.group(2)))
except (AttributeError, ValueError, TypeError):
version = (0, 0)
self.dbapi_version = version
if version < (5, 0):
has_native_hstore = has_native_json = has_native_uuid = False
if version != (0, 0):
util.warn(
"PyGreSQL is only fully supported by SQLAlchemy"
" since version 5.0."
)
else:
self.supports_unicode_statements = True
self.supports_unicode_binds = True
has_native_hstore = has_native_json = has_native_uuid = True
self.has_native_hstore = has_native_hstore
self.has_native_json = has_native_json
self.has_native_uuid = has_native_uuid
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if "port" in opts:
opts["host"] = "%s:%s" % (
opts.get("host", "").rsplit(":", 1)[0],
opts.pop("port"),
)
opts.update(url.query)
return [], opts
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
if not connection:
return False
try:
connection = connection.connection
except AttributeError:
pass
else:
if not connection:
return False
try:
return connection.closed
except AttributeError: # PyGreSQL < 5.0
return connection._cnx is None
return False
dialect = PGDialect_pygresql
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/ranges.py | # Copyright (C) 2013-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ... import types as sqltypes
__all__ = ("INT4RANGE", "INT8RANGE", "NUMRANGE")
class RangeOperators(object):
"""
This mixin provides functionality for the Range Operators
listed in Table 9-44 of the `postgres documentation`__ for Range
Functions and Operators. It is used by all the range types
provided in the ``postgres`` dialect and can likely be used for
any range types you create yourself.
__ http://www.postgresql.org/docs/devel/static/functions-range.html
No extra support is provided for the Range Functions listed in
Table 9-45 of the postgres documentation. For these, the normal
:func:`~sqlalchemy.sql.expression.func` object should be used.
"""
class comparator_factory(sqltypes.Concatenable.Comparator):
"""Define comparison operations for range types."""
def __ne__(self, other):
"Boolean expression. Returns true if two ranges are not equal"
if other is None:
return super(RangeOperators.comparator_factory, self).__ne__(
other
)
else:
return self.expr.op("<>")(other)
def contains(self, other, **kw):
"""Boolean expression. Returns true if the right hand operand,
which can be an element or a range, is contained within the
column.
"""
return self.expr.op("@>")(other)
def contained_by(self, other):
"""Boolean expression. Returns true if the column is contained
within the right hand operand.
"""
return self.expr.op("<@")(other)
def overlaps(self, other):
"""Boolean expression. Returns true if the column overlaps
(has points in common with) the right hand operand.
"""
return self.expr.op("&&")(other)
def strictly_left_of(self, other):
"""Boolean expression. Returns true if the column is strictly
left of the right hand operand.
"""
return self.expr.op("<<")(other)
__lshift__ = strictly_left_of
def strictly_right_of(self, other):
"""Boolean expression. Returns true if the column is strictly
right of the right hand operand.
"""
return self.expr.op(">>")(other)
__rshift__ = strictly_right_of
def not_extend_right_of(self, other):
"""Boolean expression. Returns true if the range in the column
does not extend right of the range in the operand.
"""
return self.expr.op("&<")(other)
def not_extend_left_of(self, other):
"""Boolean expression. Returns true if the range in the column
does not extend left of the range in the operand.
"""
return self.expr.op("&>")(other)
def adjacent_to(self, other):
"""Boolean expression. Returns true if the range in the column
is adjacent to the range in the operand.
"""
return self.expr.op("-|-")(other)
def __add__(self, other):
"""Range expression. Returns the union of the two ranges.
Will raise an exception if the resulting range is not
contigous.
"""
return self.expr.op("+")(other)
class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL INT4RANGE type.
"""
__visit_name__ = "INT4RANGE"
class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL INT8RANGE type.
"""
__visit_name__ = "INT8RANGE"
class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL NUMRANGE type.
"""
__visit_name__ = "NUMRANGE"
class DATERANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL DATERANGE type.
"""
__visit_name__ = "DATERANGE"
class TSRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL TSRANGE type.
"""
__visit_name__ = "TSRANGE"
class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the PostgreSQL TSTZRANGE type.
"""
__visit_name__ = "TSTZRANGE"
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py | # postgresql/pypostgresql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+pypostgresql
:name: py-postgresql
:dbapi: pypostgresql
:connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]
:url: http://python.projects.pgfoundry.org/
.. note::
The pypostgresql dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended PostgreSQL
driver is psycopg2.
""" # noqa
from .base import PGDialect
from .base import PGExecutionContext
from ... import processors
from ... import types as sqltypes
from ... import util
class PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
if self.asdecimal:
return None
else:
return processors.to_float
class PGExecutionContext_pypostgresql(PGExecutionContext):
pass
class PGDialect_pypostgresql(PGDialect):
driver = "pypostgresql"
supports_unicode_statements = True
supports_unicode_binds = True
description_encoding = None
default_paramstyle = "pyformat"
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropriately
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pypostgresql
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: PGNumeric,
# prevents PGNumeric from being used
sqltypes.Float: sqltypes.Float,
},
)
@classmethod
def dbapi(cls):
from postgresql.driver import dbapi20
return dbapi20
_DBAPI_ERROR_NAMES = [
"Error",
"InterfaceError",
"DatabaseError",
"DataError",
"OperationalError",
"IntegrityError",
"InternalError",
"ProgrammingError",
"NotSupportedError",
]
@util.memoized_property
def dbapi_exception_translation_map(self):
if self.dbapi is None:
return {}
return dict(
(getattr(self.dbapi, name).__name__, name)
for name in self._DBAPI_ERROR_NAMES
)
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if "port" in opts:
opts["port"] = int(opts["port"])
else:
opts["port"] = 5432
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
dialect = PGDialect_pypostgresql
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py | # postgresql/psycopg2.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`_sa.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows
are not immediately pre-fetched and buffered after statement execution, but
are instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the :paramref:`.Connection.execution_options.stream_results`
execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
.. seealso::
:ref:`psycopg2_disable_native_unicode`
* ``isolation_level``: This option, available for all PostgreSQL dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect.
.. seealso::
:ref:`psycopg2_isolation_level`
* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
using psycopg2's ``set_client_encoding()`` method.
.. seealso::
:ref:`psycopg2_unicode`
* ``executemany_mode``, ``executemany_batch_page_size``,
``executemany_values_page_size``: Allows use of psycopg2
extensions for optimizing "executemany"-stye queries. See the referenced
section below for details.
.. seealso::
:ref:`psycopg2_executemany_mode`
* ``use_batch_mode``: this is the previous setting used to affect "executemany"
mode and is now deprecated.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
.. seealso::
`PQconnectdbParams \
<http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Empty DSN Connections / Environment Variable Connections
---------------------------------------------------------
The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the
libpq client library, which by default indicates to connect to a localhost
PostgreSQL database that is open for "trust" connections. This behavior can be
further tailored using a particular set of environment variables which are
prefixed with ``PG_...``, which are consumed by ``libpq`` to take the place of
any or all elements of the connection string.
For this form, the URL can be passed without any elements other than the
initial scheme::
engine = create_engine('postgresql+psycopg2://')
In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
function which in turn represents an empty DSN passed to libpq.
.. versionadded:: 1.3.2 support for parameter-less connections with psycopg2.
.. seealso::
`Environment Variables\
<https://www.postgresql.org/docs/current/libpq-envars.html>`_ -
PostgreSQL documentation on how to use ``PG_...``
environment variables for connections.
.. _psycopg2_execution_options:
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`_engine.Connection.execution_options`,
:meth:`.Executable.execution_options`,
:meth:`_query.Query.execution_options`,
in addition to those not specific to DBAPIs:
* ``isolation_level`` - Set the transaction isolation level for the lifespan
of a :class:`_engine.Connection` (can only be set on a connection,
not a statement
or query). See :ref:`psycopg2_isolation_level`.
* ``stream_results`` - Enable or disable usage of psycopg2 server side
cursors - this feature makes use of "named" cursors in combination with
special result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`_engine.Engine` is used.
* ``max_row_buffer`` - when using ``stream_results``, an integer value that
specifies the maximum number of rows to buffer at a time. This is
interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
buffer will grow to ultimately store 1000 rows at a time.
.. versionadded:: 1.0.6
.. _psycopg2_batch_mode:
.. _psycopg2_executemany_mode:
Psycopg2 Fast Execution Helpers
-------------------------------
Modern versions of psycopg2 include a feature known as
`Fast Execution Helpers \
<http://initd.org/psycopg/docs/extras.html#fast-execution-helpers>`_, which
have been shown in benchmarking to improve psycopg2's executemany()
performance, primarily with INSERT statements, by multiple orders of magnitude.
SQLAlchemy allows this extension to be used for all ``executemany()`` style
calls invoked by an :class:`_engine.Engine`
when used with :ref:`multiple parameter
sets <execute_multiple>`, which includes the use of this feature both by the
Core as well as by the ORM for inserts of objects with non-autogenerated
primary key values, by adding the ``executemany_mode`` flag to
:func:`_sa.create_engine`::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='batch')
.. versionchanged:: 1.3.7 - the ``use_batch_mode`` flag has been superseded
by a new parameter ``executemany_mode`` which provides support both for
psycopg2's ``execute_batch`` helper as well as the ``execute_values``
helper.
Possible options for ``executemany_mode`` include:
* ``None`` - By default, psycopg2's extensions are not used, and the usual
``cursor.executemany()`` method is used when invoking batches of statements.
* ``'batch'`` - Uses ``psycopg2.extras.execute_batch`` so that multiple copies
of a SQL query, each one corresponding to a parameter set passed to
``executemany()``, are joined into a single SQL string separated by a
semicolon. This is the same behavior as was provided by the
``use_batch_mode=True`` flag.
* ``'values'``- For Core :func:`_expression.insert`
constructs only (including those
emitted by the ORM automatically), the ``psycopg2.extras.execute_values``
extension is used so that multiple parameter sets are grouped into a single
INSERT statement and joined together with multiple VALUES expressions. This
method requires that the string text of the VALUES clause inside the
INSERT statement is manipulated, so is only supported with a compiled
:func:`_expression.insert` construct where the format is predictable.
For all other
constructs, including plain textual INSERT statements not rendered by the
SQLAlchemy expression language compiler, the
``psycopg2.extras.execute_batch`` method is used. It is therefore important
to note that **"values" mode implies that "batch" mode is also used for
all statements for which "values" mode does not apply**.
For both strategies, the ``executemany_batch_page_size`` and
``executemany_values_page_size`` arguments control how many parameter sets
should be represented in each execution. Because "values" mode implies a
fallback down to "batch" mode for non-INSERT statements, there are two
independent page size arguments. For each, the default value of ``None`` means
to use psycopg2's defaults, which at the time of this writing are quite low at
100. For the ``execute_values`` method, a number as high as 10000 may prove
to be performant, whereas for ``execute_batch``, as the number represents
full statements repeated, a number closer to the default of 100 is likely
more appropriate::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values',
executemany_values_page_size=10000, executemany_batch_page_size=500)
.. seealso::
:ref:`execute_multiple` - General information on using the
:class:`_engine.Connection`
object to execute statements in such a way as to make
use of the DBAPI ``.executemany()`` method.
.. versionchanged:: 1.3.7 - Added support for
``psycopg2.extras.execute_values``. The ``use_batch_mode`` flag is
superseded by the ``executemany_mode`` flag.
.. _psycopg2_unicode:
Unicode with Psycopg2
----------------------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf8``, as a more useful default::
# postgresql.conf file
# client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's
:meth:`psycopg2:connection.set_client_encoding` method
on all new connections based on the value passed to
:func:`_sa.create_engine` using the ``client_encoding`` parameter::
# set_client_encoding() setting;
# works for *all* PostgreSQL versions
engine = create_engine("postgresql://user:pass@host/dbname",
client_encoding='utf8')
This overrides the encoding specified in the PostgreSQL client configuration.
When using the parameter in this way, the psycopg2 driver emits
``SET client_encoding TO 'utf8'`` on the connection explicitly, and works
in all PostgreSQL versions.
Note that the ``client_encoding`` setting as passed to
:func:`_sa.create_engine`
is **not the same** as the more recently added ``client_encoding`` parameter
now supported by libpq directly. This is enabled when ``client_encoding``
is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed
using the :paramref:`_sa.create_engine.connect_args` parameter::
engine = create_engine(
"postgresql://user:pass@host/dbname",
connect_args={'client_encoding': 'utf8'})
# using the query string is equivalent
engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8")
The above parameter was only added to libpq as of version 9.1 of PostgreSQL,
so using the previous method is better for cross-version support.
.. _psycopg2_disable_native_unicode:
Disabling Native Unicode
^^^^^^^^^^^^^^^^^^^^^^^^
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize its own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`_sa.create_engine` will disable usage of ``psycopg2.extensions.
UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`_sa.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as most DBAPIs now support unicode fully.
Bound Parameter Styles
----------------------
The default parameter style for the psycopg2 dialect is "pyformat", where
SQL is rendered using ``%(paramname)s`` style. This format has the limitation
that it does not accommodate the unusual case of parameter names that
actually contain percent or parenthesis symbols; as SQLAlchemy in many cases
generates bound parameter names based on the name of a column, the presence
of these characters in a column name can lead to problems.
There are two solutions to the issue of a :class:`_schema.Column`
that contains
one of these characters in its name. One is to specify the
:paramref:`.schema.Column.key` for columns that have such names::
measurement = Table('measurement', metadata,
Column('Size (meters)', Integer, key='size_meters')
)
Above, an INSERT statement such as ``measurement.insert()`` will use
``size_meters`` as the parameter name, and a SQL expression such as
``measurement.c.size_meters > 10`` will derive the bound parameter name
from the ``size_meters`` key as well.
.. versionchanged:: 1.0.0 - SQL expressions will use
:attr:`_schema.Column.key`
as the source of naming when anonymous bound parameters are created
in SQL expressions; previously, this behavior only applied to
:meth:`_schema.Table.insert` and :meth:`_schema.Table.update`
parameter names.
The other solution is to use a positional format; psycopg2 allows use of the
"format" paramstyle, which can be passed to
:paramref:`_sa.create_engine.paramstyle`::
engine = create_engine(
'postgresql://scott:tiger@localhost:5432/test', paramstyle='format')
With the above engine, instead of a statement like::
INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s)
{'Size (meters)': 1}
we instead see::
INSERT INTO measurement ("Size (meters)") VALUES (%s)
(1, )
Where above, the dictionary style is converted into a tuple with positional
style.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all PostgreSQL dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`_sa.create_engine`
,
as well as the ``isolation_level`` argument used by
:meth:`_engine.Connection.execution_options`. When using the psycopg2 dialect
, these
options make use of psycopg2's ``set_isolation_level()`` connection method,
rather than emitting a PostgreSQL directive; this is because psycopg2's
API-level setting is always emitted at the start of each transaction in any
case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`pg8000_isolation_level`
NOTICE logging
---------------
The psycopg2 dialect will log PostgreSQL NOTICE messages
via the ``sqlalchemy.dialects.postgresql`` logger. When this logger
is set to the ``logging.INFO`` level, notice messages will be logged::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
Above, it is assumed that logging is configured externally. If this is not
the case, configuration such as ``logging.basicConfig()`` must be utilized::
import logging
logging.basicConfig() # log messages to stdout
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. seealso::
`Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website
.. _psycopg2_hstore:
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when psycopg2 version 2.4 or greater is used, and
it is detected that the target database has the HSTORE type set up for use.
In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using
``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine
that the ``HSTORE`` extension is present.
This function is **skipped** if the version of psycopg2 installed is
less than version 2.4.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python
dictionaries being accepted as parameters regardless of the type of target
column in SQL**. The dictionaries are converted by this extension into a
textual HSTORE expression. If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
which may be more performant.
""" # noqa
from __future__ import absolute_import
import decimal
import logging
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import ENUM
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...engine import result as _result
from ...util import collections_abc
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
logger = logging.getLogger("sqlalchemy.dialects.postgresql")
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if util.py2k and self._expect_unicode is True:
# for py2k, if the enum type needs unicode data (which is set up as
# part of the Enum() constructor based on values passed as py2k
# unicode objects) we have to use our own converters since
# psycopg2's don't work, a rare exception to the "modern DBAPIs
# support unicode everywhere" theme of deprecating
# convert_unicode=True. Use the special "force_nocheck" directive
# which forces unicode conversion to happen on the Python side
# without an isinstance() check. in py3k psycopg2 does the right
# thing automatically.
self._expect_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def result_processor(self, dialect, coltype):
if dialect._has_native_jsonb:
return None
else:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_server_side_cursor(self):
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
def get_result_proxy(self):
self._log_notices(self.cursor)
if self._is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
# check also that notices is an iterable, after it's already
# established that we will be iterating through it. This is to get
# around test suites such as SQLAlchemy's using a Mock object for
# cursor
if not cursor.connection.notices or not isinstance(
cursor.connection.notices, collections_abc.Iterable
):
return
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
pass
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
pass
EXECUTEMANY_DEFAULT = util.symbol("executemany_default")
EXECUTEMANY_BATCH = util.symbol("executemany_batch")
EXECUTEMANY_VALUES = util.symbol("executemany_values")
class PGDialect_psycopg2(PGDialect):
driver = "psycopg2"
if util.py2k:
supports_unicode_statements = False
supports_server_side_cursors = True
default_paramstyle = "pyformat"
# set to true based on psycopg2 version
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
FEATURE_VERSION_MAP = dict(
native_json=(2, 5),
native_jsonb=(2, 5, 4),
sane_multi_rowcount=(2, 0, 9),
array_oid=(2, 4, 3),
hstore_adapter=(2, 4),
)
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
engine_config_types = PGDialect.engine_config_types.union(
[("use_native_unicode", util.asbool)]
)
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
},
)
@util.deprecated_params(
use_batch_mode=(
"1.3.7",
"The psycopg2 use_batch_mode flag is superseded by "
"executemany_mode='batch'",
)
)
def __init__(
self,
server_side_cursors=False,
use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
use_native_uuid=True,
executemany_mode=None,
executemany_batch_page_size=None,
executemany_values_page_size=None,
use_batch_mode=None,
**kwargs
):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.use_native_uuid = use_native_uuid
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
# Parse executemany_mode argument, allowing it to be only one of the
# symbol names
self.executemany_mode = util.symbol.parse_user_argument(
executemany_mode,
{
EXECUTEMANY_DEFAULT: [None],
EXECUTEMANY_BATCH: ["batch"],
EXECUTEMANY_VALUES: ["values"],
},
"executemany_mode",
)
if use_batch_mode:
self.executemany_mode = EXECUTEMANY_BATCH
self.executemany_batch_page_size = executemany_batch_page_size
self.executemany_values_page_size = executemany_values_page_size
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x) for x in m.group(1, 2, 3) if x is not None
)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = (
self.use_native_hstore
and self._hstore_oids(connection.connection) is not None
)
self._has_native_json = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_json"]
)
self._has_native_jsonb = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_jsonb"]
)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
self.supports_sane_multi_rowcount = (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["sane_multi_rowcount"]
and self.executemany_mode is EXECUTEMANY_DEFAULT
)
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@classmethod
def _psycopg2_extensions(cls):
from psycopg2 import extensions
return extensions
@classmethod
def _psycopg2_extras(cls):
from psycopg2 import extras
return extras
@util.memoized_property
def _isolation_lookup(self):
extensions = self._psycopg2_extensions()
return {
"AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
"READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
"READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
"REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
"SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace("_", " ")]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
),
replace_context=err,
)
connection.set_isolation_level(level)
def on_connect(self):
extras = self._psycopg2_extras()
extensions = self._psycopg2_extensions()
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_uuid:
def on_connect(conn):
extras.register_uuid(None, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
kw = {"oid": oid}
if util.py2k:
kw["unicode"] = True
if (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["array_oid"]
):
kw["array_oid"] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
if self._has_native_json:
extras.register_default_json(
conn, loads=self._json_deserializer
)
if self._has_native_jsonb:
extras.register_default_jsonb(
conn, loads=self._json_deserializer
)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
def do_executemany(self, cursor, statement, parameters, context=None):
if self.executemany_mode is EXECUTEMANY_DEFAULT:
cursor.executemany(statement, parameters)
return
if (
self.executemany_mode is EXECUTEMANY_VALUES
and context
and context.isinsert
and context.compiled.insert_single_values_expr
):
executemany_values = (
"(%s)" % context.compiled.insert_single_values_expr
)
# guard for statement that was altered via event hook or similar
if executemany_values not in statement:
executemany_values = None
else:
executemany_values = None
if executemany_values:
# Currently, SQLAlchemy does not pass "RETURNING" statements
# into executemany(), since no DBAPI has ever supported that
# until the introduction of psycopg2's executemany_values, so
# we are not yet using the fetch=True flag.
statement = statement.replace(executemany_values, "%s")
if self.executemany_values_page_size:
kwargs = {"page_size": self.executemany_values_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_values(
cursor,
statement,
parameters,
template=executemany_values,
**kwargs
)
else:
if self.executemany_batch_page_size:
kwargs = {"page_size": self.executemany_batch_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_batch(
cursor, statement, parameters, **kwargs
)
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= self.FEATURE_VERSION_MAP["hstore_adapter"]:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts:
if "port" in opts:
opts["port"] = int(opts["port"])
opts.update(url.query)
# send individual dbname, user, password, host, port
# parameters to psycopg2.connect()
return ([], opts)
elif url.query:
# any other connection arguments, pass directly
opts.update(url.query)
return ([], opts)
else:
# no connection arguments whatsoever; psycopg2.connect()
# requires that "dsn" be present as a blank string.
return ([""], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
# check the "closed" flag. this might not be
# present on old psycopg2 versions. Also,
# this flag doesn't actually help in a lot of disconnect
# situations, so don't rely on it.
if getattr(connection, "closed", False):
return True
# checks based on strings. in the case that .closed
# didn't cut it, fall back onto these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/conenction.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Operation timed out",
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/provision.py | import time
from ... import exc
from ... import text
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import log
from ...testing.provision import temp_table_keyword_args
@create_db.for_db("postgresql")
def _pg_create_db(cfg, eng, ident):
template_db = cfg.options.postgresql_templatedb
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
try:
_pg_drop_db(cfg, conn, ident)
except Exception:
pass
if not template_db:
template_db = conn.scalar("select current_database()")
attempt = 0
while True:
try:
conn.execute(
"CREATE DATABASE %s TEMPLATE %s" % (ident, template_db)
)
except exc.OperationalError as err:
attempt += 1
if attempt >= 3:
raise
if "accessed by other users" in str(err):
log.info(
"Waiting to create %s, URI %r, "
"template DB %s is in use sleeping for .5",
ident,
eng.url,
template_db,
)
time.sleep(0.5)
except:
raise
else:
break
@drop_db.for_db("postgresql")
def _pg_drop_db(cfg, eng, ident):
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
conn.execute(
text(
"select pg_terminate_backend(pid) from pg_stat_activity "
"where usename=current_user and pid != pg_backend_pid() "
"and datname=:dname"
),
dname=ident,
)
conn.execute("DROP DATABASE %s" % ident)
@temp_table_keyword_args.for_db("postgresql")
def _postgresql_temp_table_keyword_args(cfg, eng):
return {"prefixes": ["TEMPORARY"]}
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/pg8000.py | # postgresql/pg8000.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+pg8000
:name: pg8000
:dbapi: pg8000
:connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
:url: https://pythonhosted.org/pg8000/
.. note::
The pg8000 dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended PostgreSQL
dialect is psycopg2.
.. _pg8000_unicode:
Unicode
-------
pg8000 will encode / decode string values between it and the server using the
PostgreSQL ``client_encoding`` parameter; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
The ``client_encoding`` can be overridden for a session by executing the SQL:
SET CLIENT_ENCODING TO 'utf8';
SQLAlchemy will execute this SQL on all new connections based on the value
passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter::
engine = create_engine(
"postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
.. _pg8000_isolation_level:
pg8000 Transaction Isolation Level
-------------------------------------
The pg8000 dialect offers the same isolation level settings as that
of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using
pg8000.
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`psycopg2_isolation_level`
""" # noqa
import decimal
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .json import JSON
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...sql.elements import quoted_name
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
class _PGNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGNumericNoBind(_PGNumeric):
def bind_processor(self, dialect):
return None
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._dbapi_version > (1, 10, 1):
return None # Has native JSON
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
class PGExecutionContext_pg8000(PGExecutionContext):
pass
class PGCompiler_pg8000(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
def post_process_text(self, text):
if "%%" in text:
util.warn(
"The SQLAlchemy postgresql dialect "
"now automatically escapes '%' in text() "
"expressions to '%%'."
)
return text.replace("%", "%%")
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class PGDialect_pg8000(PGDialect):
driver = "pg8000"
supports_unicode_statements = True
supports_unicode_binds = True
default_paramstyle = "format"
supports_sane_multi_rowcount = True
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
description_encoding = "use_encoding"
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumericNoBind,
sqltypes.Float: _PGNumeric,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
UUID: _PGUUID,
},
)
def __init__(self, client_encoding=None, **kwargs):
PGDialect.__init__(self, **kwargs)
self.client_encoding = client_encoding
def initialize(self, connection):
self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14)
super(PGDialect_pg8000, self).initialize(connection)
@util.memoized_property
def _dbapi_version(self):
if self.dbapi and hasattr(self.dbapi, "__version__"):
return tuple(
[
int(x)
for x in re.findall(
r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
)
]
)
else:
return (99, 99, 99)
@classmethod
def dbapi(cls):
return __import__("pg8000")
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if "port" in opts:
opts["port"] = int(opts["port"])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
# adjust for ConnectionFairy possibly being present
if hasattr(connection, "connection"):
connection = connection.connection
if level == "AUTOCOMMIT":
connection.autocommit = True
elif level in self._isolation_lookup:
connection.autocommit = False
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level
)
cursor.execute("COMMIT")
cursor.close()
else:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s or AUTOCOMMIT"
% (level, self.name, ", ".join(self._isolation_lookup))
)
def set_client_encoding(self, connection, client_encoding):
# adjust for ConnectionFairy possibly being present
if hasattr(connection, "connection"):
connection = connection.connection
cursor = connection.cursor()
cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'")
cursor.execute("COMMIT")
cursor.close()
def do_begin_twophase(self, connection, xid):
connection.connection.tpc_begin((0, xid, ""))
def do_prepare_twophase(self, connection, xid):
connection.connection.tpc_prepare()
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
connection.connection.tpc_rollback((0, xid, ""))
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
connection.connection.tpc_commit((0, xid, ""))
def do_recover_twophase(self, connection):
return [row[1] for row in connection.connection.tpc_recover()]
def on_connect(self):
fns = []
def on_connect(conn):
conn.py_types[quoted_name] = conn.py_types[util.text_type]
fns.append(on_connect)
if self.client_encoding is not None:
def on_connect(conn):
self.set_client_encoding(conn, self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if len(fns) > 0:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
dialect = PGDialect_pg8000
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/array.py | # postgresql/array.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from ... import types as sqltypes
from ... import util
from ...sql import expression
from ...sql import operators
def Any(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.any` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`_expression.any_`
"""
return arrexpr.any(other, operator)
def All(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.all` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`_expression.all_`
"""
return arrexpr.all(other, operator)
class array(expression.Tuple):
"""A PostgreSQL ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select([
array([1,2]) + array([3,4,5])
])
print(stmt.compile(dialect=postgresql.dialect()))
Produces the SQL::
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`_types.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
Multidimensional arrays are produced by nesting :class:`.array` constructs.
The dimensionality of the final :class:`_types.ARRAY`
type is calculated by
recursively adding the dimensions of the inner :class:`_types.ARRAY`
type::
stmt = select([
array([
array([1, 2]), array([3, 4]), array([column('q'), column('x')])
])
])
print(stmt.compile(dialect=postgresql.dialect()))
Produces::
SELECT ARRAY[ARRAY[%(param_1)s, %(param_2)s],
ARRAY[%(param_3)s, %(param_4)s], ARRAY[q, x]] AS anon_1
.. versionadded:: 1.3.6 added support for multidimensional array literals
.. seealso::
:class:`_postgresql.ARRAY`
"""
__visit_name__ = "array"
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
if isinstance(self.type, ARRAY):
self.type = ARRAY(
self.type.item_type,
dimensions=self.type.dimensions + 1
if self.type.dimensions is not None
else 2,
)
else:
self.type = ARRAY(self.type)
def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
if _assume_scalar or operator is operators.getitem:
return expression.BindParameter(
None,
obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type,
unique=True,
)
else:
return array(
[
self._bind_param(
operator, o, _assume_scalar=True, type_=type_
)
for o in obj
]
)
def self_group(self, against=None):
if against in (operators.any_op, operators.all_op, operators.getitem):
return expression.Grouping(self)
else:
return self
CONTAINS = operators.custom_op("@>", precedence=5)
CONTAINED_BY = operators.custom_op("<@", precedence=5)
OVERLAP = operators.custom_op("&&", precedence=5)
class ARRAY(sqltypes.ARRAY):
"""PostgreSQL ARRAY type.
.. versionchanged:: 1.1 The :class:`_postgresql.ARRAY` type is now
a subclass of the core :class:`_types.ARRAY` type.
The :class:`_postgresql.ARRAY` type is constructed in the same way
as the core :class:`_types.ARRAY` type; a member type is required, and a
number of dimensions is recommended if the type is to be used for more
than one dimension::
from sqlalchemy.dialects import postgresql
mytable = Table("mytable", metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2))
)
The :class:`_postgresql.ARRAY` type provides all operations defined on the
core :class:`_types.ARRAY` type, including support for "dimensions",
indexed access, and simple matching such as
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`. :class:`_postgresql.ARRAY`
class also
provides PostgreSQL-specific methods for containment operations, including
:meth:`.postgresql.ARRAY.Comparator.contains`
:meth:`.postgresql.ARRAY.Comparator.contained_by`, and
:meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
mytable.c.data.contains([1, 2])
The :class:`_postgresql.ARRAY` type may not be supported on all
PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
Additionally, the :class:`_postgresql.ARRAY`
type does not work directly in
conjunction with the :class:`.ENUM` type. For a workaround, see the
special type at :ref:`postgresql_array_of_enum`.
.. seealso::
:class:`_types.ARRAY` - base array type
:class:`_postgresql.array` - produces a literal array value.
"""
class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`_types.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.ARRAY.Comparator` class, including
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
def __init__(
self, item_type, as_tuple=False, dimensions=None, zero_indexes=False
):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This will cause the DDL emitted for this
ARRAY to include the exact number of bracket clauses ``[]``,
and will also optimize the performance of the type overall.
Note that PG arrays are always implicitly "non-dimensioned",
meaning they can store any number of dimensions no matter how
they were declared.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and PostgreSQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
.. versionadded:: 0.9.5
"""
if isinstance(item_type, ARRAY):
raise ValueError(
"Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype"
)
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
if (
dim == 1
or dim is None
and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
not arr
or not isinstance(arr[0], (list, tuple))
)
):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
self._proc_array(
x,
itemproc,
dim - 1 if dim is not None else None,
collection,
)
for x in arr
)
@util.memoized_property
def _require_cast(self):
return self._against_native_enum or isinstance(
self.item_type, sqltypes.JSON
)
@util.memoized_property
def _against_native_enum(self):
return (
isinstance(self.item_type, sqltypes.Enum)
and self.item_type.native_enum
)
def bind_expression(self, bindvalue):
if self._require_cast:
return expression.cast(bindvalue, self)
else:
return bindvalue
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).bind_processor(
dialect
)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value, item_proc, self.dimensions, list
)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).result_processor(
dialect, coltype
)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
tuple if self.as_tuple else list,
)
if self._against_native_enum:
super_rp = process
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
return inner.split(",") if inner else []
def process(value):
if value is None:
return value
# isinstance(value, util.string_types) is required to handle
# the # case where a TypeDecorator for and Array of Enum is
# used like was required in sa < 1.3.17
return super_rp(
handle_raw_string(value)
if isinstance(value, util.string_types)
else value
)
return process
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/dml.py | # postgresql/on_conflict.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import ext
from ... import util
from ...sql import schema
from ...sql.base import _generative
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.expression import alias
from ...util.langhelpers import public_factory
__all__ = ("Insert", "insert")
class Insert(StandardInsert):
"""PostgreSQL-specific implementation of INSERT.
Adds methods for PG-specific syntaxes such as ON CONFLICT.
The :class:`_postgresql.Insert` object is created using the
:func:`sqlalchemy.dialects.postgresql.insert` function.
.. versionadded:: 1.1
"""
@util.memoized_property
def excluded(self):
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
PG's ON CONFLICT clause allows reference to the row that would
be inserted, known as ``excluded``. This attribute provides
all columns in this row to be referenceable.
.. seealso::
:ref:`postgresql_insert_on_conflict` - example of how
to use :attr:`_expression.Insert.excluded`
"""
return alias(self.table, name="excluded").columns
@_generative
def on_conflict_do_update(
self,
constraint=None,
index_elements=None,
index_where=None,
set_=None,
where=None,
):
r"""
Specifies a DO UPDATE SET action for ON CONFLICT clause.
Either the ``constraint`` or ``index_elements`` argument is
required, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
:param set\_:
Required argument. A dictionary or other mapping object
with column names as keys and expressions or literals as values,
specifying the ``SET`` actions to take.
If the target :class:`_schema.Column` specifies a ".
key" attribute distinct
from the column name, that key should be used.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of
UPDATE, unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. If present, can be a literal SQL
string or an acceptable expression for a ``WHERE`` clause
that restricts the rows affected by ``DO UPDATE SET``. Rows
not meeting the ``WHERE`` condition will not be updated
(effectively a ``DO NOTHING`` for those rows).
.. versionadded:: 1.1
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoUpdate(
constraint, index_elements, index_where, set_, where
)
return self
@_generative
def on_conflict_do_nothing(
self, constraint=None, index_elements=None, index_where=None
):
"""
Specifies a DO NOTHING action for ON CONFLICT clause.
The ``constraint`` and ``index_elements`` arguments
are optional, but only one of these can be specified.
:param constraint:
The name of a unique or exclusion constraint on the table,
or the constraint object itself if it has a .name attribute.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
.. versionadded:: 1.1
.. seealso::
:ref:`postgresql_insert_on_conflict`
"""
self._post_values_clause = OnConflictDoNothing(
constraint, index_elements, index_where
)
return self
insert = public_factory(
Insert, ".dialects.postgresql.insert", ".dialects.postgresql.Insert"
)
class OnConflictClause(ClauseElement):
def __init__(self, constraint=None, index_elements=None, index_where=None):
if constraint is not None:
if not isinstance(constraint, util.string_types) and isinstance(
constraint,
(schema.Index, schema.Constraint, ext.ExcludeConstraint),
):
constraint = getattr(constraint, "name") or constraint
if constraint is not None:
if index_elements is not None:
raise ValueError(
"'constraint' and 'index_elements' are mutually exclusive"
)
if isinstance(constraint, util.string_types):
self.constraint_target = constraint
self.inferred_target_elements = None
self.inferred_target_whereclause = None
elif isinstance(constraint, schema.Index):
index_elements = constraint.expressions
index_where = constraint.dialect_options["postgresql"].get(
"where"
)
elif isinstance(constraint, ext.ExcludeConstraint):
index_elements = constraint.columns
index_where = constraint.where
else:
index_elements = constraint.columns
index_where = constraint.dialect_options["postgresql"].get(
"where"
)
if index_elements is not None:
self.constraint_target = None
self.inferred_target_elements = index_elements
self.inferred_target_whereclause = index_where
elif constraint is None:
self.constraint_target = (
self.inferred_target_elements
) = self.inferred_target_whereclause = None
class OnConflictDoNothing(OnConflictClause):
__visit_name__ = "on_conflict_do_nothing"
class OnConflictDoUpdate(OnConflictClause):
__visit_name__ = "on_conflict_do_update"
def __init__(
self,
constraint=None,
index_elements=None,
index_where=None,
set_=None,
where=None,
):
super(OnConflictDoUpdate, self).__init__(
constraint=constraint,
index_elements=index_elements,
index_where=index_where,
)
if (
self.inferred_target_elements is None
and self.constraint_target is None
):
raise ValueError(
"Either constraint or index_elements, "
"but not both, must be specified unless DO NOTHING"
)
if not isinstance(set_, dict) or not set_:
raise ValueError("set parameter must be a non-empty dictionary")
self.update_values_to_set = [
(key, value) for key, value in set_.items()
]
self.update_whereclause = where
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/json.py | # postgresql/json.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from ... import types as sqltypes
from ... import util
from ...sql import operators
__all__ = ("JSON", "JSONB")
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
ASTEXT = operators.custom_op(
"->>",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
JSONPATH_ASTEXT = operators.custom_op(
"#>>",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_KEY = operators.custom_op(
"?",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_ALL = operators.custom_op(
"?&",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_ANY = operators.custom_op(
"?|",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
CONTAINS = operators.custom_op(
"@>",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
CONTAINED_BY = operators.custom_op(
"<@",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
class JSONPathType(sqltypes.JSON.JSONPathType):
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
assert isinstance(value, util.collections_abc.Sequence)
tokens = [util.text_type(elem) for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
assert isinstance(value, util.collections_abc.Sequence)
tokens = [util.text_type(elem) for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
return value
return process
class JSON(sqltypes.JSON):
"""Represent the PostgreSQL JSON type.
This type is a specialization of the Core-level :class:`_types.JSON`
type. Be sure to read the documentation for :class:`_types.JSON` for
important tips regarding treatment of NULL values and ORM use.
.. versionchanged:: 1.1 :class:`_postgresql.JSON` is now a PostgreSQL-
specific specialization of the new :class:`_types.JSON` type.
The operators provided by the PostgreSQL version of :class:`_types.JSON`
include:
* Index operations (the ``->`` operator)::
data_table.c.data['some key']
data_table.c.data[5]
* Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
Note that equivalent functionality is available via the
:attr:`.JSON.Comparator.as_string` accessor.
* Index operations with CAST
(equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
data_table.c.data['some key'].astext.cast(Integer) == 5
Note that equivalent functionality is available via the
:attr:`.JSON.Comparator.as_integer` and similar accessors.
* Path index operations (the ``#>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Path index operations returning text (the ``#>>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == 'some value'
.. versionchanged:: 1.1 The :meth:`_expression.ColumnElement.cast`
operator on
JSON objects now requires that the :attr:`.JSON.Comparator.astext`
modifier be called explicitly, if the cast works only from a textual
string.
Index operations return an expression object whose type defaults to
:class:`_types.JSON` by default,
so that further JSON-oriented instructions
may be called upon the result type.
Custom serializers and deserializers are specified at the dialect level,
that is using :func:`_sa.create_engine`. The reason for this is that when
using psycopg2, the DBAPI only allows serializers at the per-cursor
or per-connection level. E.g.::
engine = create_engine("postgresql://scott:tiger@localhost/test",
json_serializer=my_serialize_fn,
json_deserializer=my_deserialize_fn
)
When using the psycopg2 dialect, the json_deserializer is registered
against the database using ``psycopg2.extras.register_default_json``.
.. seealso::
:class:`_types.JSON` - Core level JSON type
:class:`_postgresql.JSONB`
""" # noqa
astext_type = sqltypes.Text()
def __init__(self, none_as_null=False, astext_type=None):
"""Construct a :class:`_types.JSON` type.
:param none_as_null: if True, persist the value ``None`` as a
SQL NULL value, not the JSON encoding of ``null``. Note that
when this flag is False, the :func:`.null` construct can still
be used to persist a NULL value::
from sqlalchemy import null
conn.execute(table.insert(), data=null())
.. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null`
is now supported in order to persist a NULL value.
.. seealso::
:attr:`_types.JSON.NULL`
:param astext_type: the type to use for the
:attr:`.JSON.Comparator.astext`
accessor on indexed attributes. Defaults to :class:`_types.Text`.
.. versionadded:: 1.1
"""
super(JSON, self).__init__(none_as_null=none_as_null)
if astext_type is not None:
self.astext_type = astext_type
class Comparator(sqltypes.JSON.Comparator):
"""Define comparison operations for :class:`_types.JSON`."""
@property
def astext(self):
"""On an indexed expression, use the "astext" (e.g. "->>")
conversion when rendered in SQL.
E.g.::
select([data_table.c.data['some key'].astext])
.. seealso::
:meth:`_expression.ColumnElement.cast`
"""
if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
return self.expr.left.operate(
JSONPATH_ASTEXT,
self.expr.right,
result_type=self.type.astext_type,
)
else:
return self.expr.left.operate(
ASTEXT, self.expr.right, result_type=self.type.astext_type
)
comparator_factory = Comparator
class JSONB(JSON):
"""Represent the PostgreSQL JSONB type.
The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data, e.
g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSONB)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
The :class:`_postgresql.JSONB` type includes all operations provided by
:class:`_types.JSON`, including the same behaviors for indexing operations
.
It also adds additional operators specific to JSONB, including
:meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
:meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
and :meth:`.JSONB.Comparator.contained_by`.
Like the :class:`_types.JSON` type, the :class:`_postgresql.JSONB`
type does not detect
in-place changes when used with the ORM, unless the
:mod:`sqlalchemy.ext.mutable` extension is used.
Custom serializers and deserializers
are shared with the :class:`_types.JSON` class,
using the ``json_serializer``
and ``json_deserializer`` keyword arguments. These must be specified
at the dialect level using :func:`_sa.create_engine`. When using
psycopg2, the serializers are associated with the jsonb type using
``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
in the same way that ``psycopg2.extras.register_default_json`` is used
to register these handlers with the json type.
.. versionadded:: 0.9.7
.. seealso::
:class:`_types.JSON`
"""
__visit_name__ = "JSONB"
class Comparator(JSON.Comparator):
"""Define comparison operations for :class:`_types.JSON`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
comparator_factory = Comparator
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/base.py | # postgresql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql
:name: PostgreSQL
.. _postgresql_sequences:
Sequences/SERIAL/IDENTITY
-------------------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
of creating new primary key values for integer-based primary key columns. When
creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
integer-based primary key columns, which generates a sequence and server side
default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
having the "last insert identifier" available, a RETURNING clause is added to
the INSERT statement which specifies the primary key columns should be
returned after the statement completes. The RETURNING functionality only takes
place if PostgreSQL 8.2 or later is in use. As a fallback approach, the
sequence, whether specified explicitly or implicitly via ``SERIAL``, is
executed independently beforehand, the returned value to be used in the
subsequent insert. Note that when an
:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the "last inserted identifier" functionality does not
apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
case.
To force the usage of RETURNING by default off, specify the flag
``implicit_returning=False`` to :func:`_sa.create_engine`.
PostgreSQL 10 IDENTITY columns
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PostgreSQL 10 has a new IDENTITY feature that supersedes the use of SERIAL.
Built-in support for rendering of IDENTITY is not available yet, however the
following compilation hook may be used to replace occurrences of SERIAL with
IDENTITY::
from sqlalchemy.schema import CreateColumn
from sqlalchemy.ext.compiler import compiles
@compiles(CreateColumn, 'postgresql')
def use_identity(element, compiler, **kw):
text = compiler.visit_create_column(element, **kw)
text = text.replace("SERIAL", "INT GENERATED BY DEFAULT AS IDENTITY")
return text
Using the above, a table such as::
t = Table(
't', m,
Column('id', Integer, primary_key=True),
Column('data', String)
)
Will generate on the backing database as::
CREATE TABLE t (
id INT GENERATED BY DEFAULT AS IDENTITY NOT NULL,
data VARCHAR,
PRIMARY KEY (id)
)
.. _postgresql_isolation_level:
Transaction Isolation Level
---------------------------
All PostgreSQL dialects support setting of transaction isolation level
both via a dialect-specific parameter
:paramref:`_sa.create_engine.isolation_level` accepted by
:func:`_sa.create_engine`,
as well as the :paramref:`.Connection.execution_options.isolation_level`
argument as passed to :meth:`_engine.Connection.execution_options`.
When using a non-psycopg2 dialect, this feature works by issuing the command
``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for
each new connection. For the special AUTOCOMMIT isolation level,
DBAPI-specific techniques are used.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"postgresql+pg8000://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT`` - on psycopg2 / pg8000 only
.. seealso::
:ref:`psycopg2_isolation_level`
:ref:`pg8000_isolation_level`
.. _postgresql_schema_reflection:
Remote-Schema Table Introspection and PostgreSQL search_path
------------------------------------------------------------
**TL;DR;**: keep the ``search_path`` variable set to its default of ``public``,
name schemas **other** than ``public`` explicitly within ``Table`` definitions.
The PostgreSQL dialect can reflect tables from any schema. The
:paramref:`_schema.Table.schema` argument, or alternatively the
:paramref:`.MetaData.reflect.schema` argument determines which schema will
be searched for the table or tables. The reflected :class:`_schema.Table`
objects
will in all cases retain this ``.schema`` attribute as was specified.
However, with regards to tables which these :class:`_schema.Table`
objects refer to
via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
`PostgreSQL search path
<http://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the PostgreSQL dialect mimics the behavior encouraged by
PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure. This function
returns a sample definition for a particular foreign key constraint,
omitting the referenced schema name from that definition when the name is
also in the PostgreSQL schema search path. The interaction below
illustrates this behavior::
test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
CREATE TABLE
test=> CREATE TABLE referring(
test(> id INTEGER PRIMARY KEY,
test(> referred_id INTEGER REFERENCES test_schema.referred(id));
CREATE TABLE
test=> SET search_path TO public, test_schema;
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f'
test-> ;
pg_get_constraintdef
---------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES referred(id)
(1 row)
Above, we created a table ``referred`` as a member of the remote schema
``test_schema``, however when we added ``test_schema`` to the
PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
the function.
On the other hand, if we set the search path back to the typical default
of ``public``::
test=> SET search_path TO public;
SET
The same query against ``pg_get_constraintdef()`` now returns the fully
schema-qualified name for us::
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f';
pg_get_constraintdef
---------------------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id)
(1 row)
SQLAlchemy will by default use the return value of ``pg_get_constraintdef()``
in order to determine the remote schema name. That is, if our ``search_path``
were set to include ``test_schema``, and we invoked a table
reflection process as follows::
>>> from sqlalchemy import Table, MetaData, create_engine
>>> engine = create_engine("postgresql://scott:tiger@localhost/test")
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta,
... autoload=True, autoload_with=conn)
...
<sqlalchemy.engine.result.ResultProxy object at 0x101612ed0>
The above process would deliver to the :attr:`_schema.MetaData.tables`
collection
``referred`` table named **without** the schema::
>>> meta.tables['referred'].schema is None
True
To alter the behavior of reflection such that the referred schema is
maintained regardless of the ``search_path`` setting, use the
``postgresql_ignore_search_path`` option, which can be specified as a
dialect-specific argument to both :class:`_schema.Table` as well as
:meth:`_schema.MetaData.reflect`::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta, autoload=True,
... autoload_with=conn,
... postgresql_ignore_search_path=True)
...
<sqlalchemy.engine.result.ResultProxy object at 0x1016126d0>
We will now have ``test_schema.referred`` stored as schema-qualified::
>>> meta.tables['test_schema.referred'].schema
'test_schema'
.. sidebar:: Best Practices for PostgreSQL Schema reflection
The description of PostgreSQL schema reflection behavior is complex, and
is the product of many years of dealing with widely varied use cases and
user preferences. But in fact, there's no need to understand any of it if
you just stick to the simplest use pattern: leave the ``search_path`` set
to its default of ``public`` only, never refer to the name ``public`` as
an explicit schema name otherwise, and refer to all other schema names
explicitly when building up a :class:`_schema.Table` object. The options
described here are only for those users who can't, or prefer not to, stay
within these guidelines.
Note that **in all cases**, the "default" schema is always reflected as
``None``. The "default" schema on PostgreSQL is that which is returned by the
PostgreSQL ``current_schema()`` function. On a typical PostgreSQL
installation, this is the name ``public``. So a table that refers to another
which is in the ``public`` (i.e. default) schema will always have the
``.schema`` attribute set to ``None``.
.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
dialect-level option accepted by :class:`_schema.Table` and
:meth:`_schema.MetaData.reflect`.
.. seealso::
`The Schema Search Path
<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
- on the PostgreSQL website.
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
for single-row INSERT statements in order to fetch newly generated
primary key identifiers. To specify an explicit ``RETURNING`` clause,
use the :meth:`._UpdateBase.returning` method on a per-statement basis::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print(result.fetchall())
# UPDATE..RETURNING
result = table.update().returning(table.c.col1, table.c.col2).\
where(table.c.name=='foo').values(name='bar')
print(result.fetchall())
# DELETE..RETURNING
result = table.delete().returning(table.c.col1, table.c.col2).\
where(table.c.name=='foo')
print(result.fetchall())
.. _postgresql_insert_on_conflict:
INSERT...ON CONFLICT (Upsert)
------------------------------
Starting with version 9.5, PostgreSQL allows "upserts" (update or insert) of
rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` statement. A
candidate row will only be inserted if that row does not violate any unique
constraints. In the case of a unique constraint violation, a secondary action
can occur which can be either "DO UPDATE", indicating that the data in the
target row should be updated, or "DO NOTHING", which indicates to silently skip
this row.
Conflicts are determined using existing unique constraints and indexes. These
constraints may be identified either using their name as stated in DDL,
or they may be *inferred* by stating the columns and conditions that comprise
the indexes.
SQLAlchemy provides ``ON CONFLICT`` support via the PostgreSQL-specific
:func:`_postgresql.insert()` function, which provides
the generative methods :meth:`~.postgresql.Insert.on_conflict_do_update`
and :meth:`~.postgresql.Insert.on_conflict_do_nothing`::
from sqlalchemy.dialects.postgresql import insert
insert_stmt = insert(my_table).values(
id='some_existing_id',
data='inserted value')
do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
index_elements=['id']
)
conn.execute(do_nothing_stmt)
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='pk_my_table',
set_=dict(data='updated value')
)
conn.execute(do_update_stmt)
Both methods supply the "target" of the conflict using either the
named constraint or by column inference:
* The :paramref:`.Insert.on_conflict_do_update.index_elements` argument
specifies a sequence containing string column names, :class:`_schema.Column`
objects, and/or SQL expression elements, which would identify a unique
index::
do_update_stmt = insert_stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value')
)
do_update_stmt = insert_stmt.on_conflict_do_update(
index_elements=[my_table.c.id],
set_=dict(data='updated value')
)
* When using :paramref:`.Insert.on_conflict_do_update.index_elements` to
infer an index, a partial index can be inferred by also specifying the
use the :paramref:`.Insert.on_conflict_do_update.index_where` parameter::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
stmt = stmt.on_conflict_do_update(
index_elements=[my_table.c.user_email],
index_where=my_table.c.user_email.like('%@gmail.com'),
set_=dict(data=stmt.excluded.data)
)
conn.execute(stmt)
* The :paramref:`.Insert.on_conflict_do_update.constraint` argument is
used to specify an index directly rather than inferring it. This can be
the name of a UNIQUE constraint, a PRIMARY KEY constraint, or an INDEX::
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='my_table_idx_1',
set_=dict(data='updated value')
)
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='my_table_pk',
set_=dict(data='updated value')
)
* The :paramref:`.Insert.on_conflict_do_update.constraint` argument may
also refer to a SQLAlchemy construct representing a constraint,
e.g. :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`,
:class:`.Index`, or :class:`.ExcludeConstraint`. In this use,
if the constraint has a name, it is used directly. Otherwise, if the
constraint is unnamed, then inference will be used, where the expressions
and optional WHERE clause of the constraint will be spelled out in the
construct. This use is especially convenient
to refer to the named or unnamed primary key of a :class:`_schema.Table`
using the
:attr:`_schema.Table.primary_key` attribute::
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint=my_table.primary_key,
set_=dict(data='updated value')
)
``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are specified using the
:paramref:`.Insert.on_conflict_do_update.set_` parameter. This
parameter accepts a dictionary which consists of direct values
for UPDATE::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
do_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value')
)
conn.execute(do_update_stmt)
.. warning::
The :meth:`_expression.Insert.on_conflict_do_update`
method does **not** take into
account Python-side default UPDATE values or generation functions, e.g.
those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of UPDATE,
unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
In order to refer to the proposed insertion row, the special alias
:attr:`~.postgresql.Insert.excluded` is available as an attribute on
the :class:`_postgresql.Insert` object; this object is a
:class:`_expression.ColumnCollection`
which alias contains all columns of the target
table::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
do_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value', author=stmt.excluded.author)
)
conn.execute(do_update_stmt)
The :meth:`_expression.Insert.on_conflict_do_update` method also accepts
a WHERE clause using the :paramref:`.Insert.on_conflict_do_update.where`
parameter, which will limit those rows which receive an UPDATE::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
on_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value', author=stmt.excluded.author)
where=(my_table.c.status == 2)
)
conn.execute(on_update_stmt)
``ON CONFLICT`` may also be used to skip inserting a row entirely
if any conflict with a unique or exclusion constraint occurs; below
this is illustrated using the
:meth:`~.postgresql.Insert.on_conflict_do_nothing` method::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
conn.execute(stmt)
If ``DO NOTHING`` is used without specifying any columns or constraint,
it has the effect of skipping the INSERT for any unique or exclusion
constraint violation which occurs::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
stmt = stmt.on_conflict_do_nothing()
conn.execute(stmt)
.. versionadded:: 1.1 Added support for PostgreSQL ON CONFLICT clauses
.. seealso::
`INSERT .. ON CONFLICT
<http://www.postgresql.org/docs/current/static/sql-insert.html#SQL-ON-CONFLICT>`_
- in the PostgreSQL documentation.
.. _postgresql_match:
Full Text Search
----------------
SQLAlchemy makes available the PostgreSQL ``@@`` operator via the
:meth:`_expression.ColumnElement.match`
method on any textual column expression.
On a PostgreSQL dialect, an expression like the following::
select([sometable.c.text.match("search string")])
will emit to the database::
SELECT text @@ to_tsquery('search string') FROM table
The PostgreSQL text search functions such as ``to_tsquery()``
and ``to_tsvector()`` are available
explicitly using the standard :data:`.func` construct. For example::
select([
func.to_tsvector('fat cats ate rats').match('cat & rat')
])
Emits the equivalent of::
SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat')
The :class:`_postgresql.TSVECTOR` type can provide for explicit CAST::
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy import select, cast
select([cast("some text", TSVECTOR)])
produces a statement equivalent to::
SELECT CAST('some text' AS TSVECTOR) AS anon_1
Full Text Searches in PostgreSQL are influenced by a combination of: the
PostgreSQL setting of ``default_text_search_config``, the ``regconfig`` used
to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in
during a query.
When performing a Full Text Search against a column that has a GIN or
GiST index that is already pre-computed (which is common on full text
searches) one may need to explicitly pass in a particular PostgreSQL
``regconfig`` value to ensure the query-planner utilizes the index and does
not re-compute the column on demand.
In order to provide for this explicit query planning, or to use different
search strategies, the ``match`` method accepts a ``postgresql_regconfig``
keyword argument::
select([mytable.c.id]).where(
mytable.c.title.match('somestring', postgresql_regconfig='english')
)
Emits the equivalent of::
SELECT mytable.id FROM mytable
WHERE mytable.title @@ to_tsquery('english', 'somestring')
One can also specifically pass in a `'regconfig'` value to the
``to_tsvector()`` command as the initial argument::
select([mytable.c.id]).where(
func.to_tsvector('english', mytable.c.title )\
.match('somestring', postgresql_regconfig='english')
)
produces a statement equivalent to::
SELECT mytable.id FROM mytable
WHERE to_tsvector('english', mytable.title) @@
to_tsquery('english', 'somestring')
It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
PostgreSQL to ensure that you are generating queries with SQLAlchemy that
take full advantage of any indexes you may have created for full text search.
FROM ONLY ...
-------------
The dialect supports PostgreSQL's ONLY keyword for targeting only a particular
table in an inheritance hierarchy. This can be used to produce the
``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...``
syntaxes. It uses SQLAlchemy's hints mechanism::
# SELECT ... FROM ONLY ...
result = table.select().with_hint(table, 'ONLY', 'postgresql')
print(result.fetchall())
# UPDATE ONLY ...
table.update(values=dict(foo='bar')).with_hint('ONLY',
dialect_name='postgresql')
# DELETE FROM ONLY ...
table.delete().with_hint('ONLY', dialect_name='postgresql')
.. _postgresql_indexes:
PostgreSQL-Specific Index Options
---------------------------------
Several extensions to the :class:`.Index` construct are available, specific
to the PostgreSQL dialect.
.. _postgresql_partial_indexes:
Partial Indexes
^^^^^^^^^^^^^^^
Partial indexes add criterion to the index definition so that the index is
applied to a subset of rows. These can be specified on :class:`.Index`
using the ``postgresql_where`` keyword argument::
Index('my_index', my_table.c.id, postgresql_where=my_table.c.value > 10)
Operator Classes
^^^^^^^^^^^^^^^^
PostgreSQL allows the specification of an *operator class* for each column of
an index (see
http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
The :class:`.Index` construct allows these to be specified via the
``postgresql_ops`` keyword argument::
Index(
'my_index', my_table.c.id, my_table.c.data,
postgresql_ops={
'data': 'text_pattern_ops',
'id': 'int4_ops'
})
Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
the :class:`_schema.Column`, i.e. the name used to access it from the ``.c``
collection of :class:`_schema.Table`,
which can be configured to be different than
the actual name of the column as expressed in the database.
If ``postgresql_ops`` is to be used against a complex SQL expression such
as a function call, then to apply to the column it must be given a label
that is identified in the dictionary by name, e.g.::
Index(
'my_index', my_table.c.id,
func.lower(my_table.c.data).label('data_lower'),
postgresql_ops={
'data_lower': 'text_pattern_ops',
'id': 'int4_ops'
})
Index Types
^^^^^^^^^^^
PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well
as the ability for users to create their own (see
http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be
specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
Index('my_index', my_table.c.data, postgresql_using='gin')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX command, so it *must* be a valid index type for your
version of PostgreSQL.
.. _postgresql_index_storage:
Index Storage Parameters
^^^^^^^^^^^^^^^^^^^^^^^^
PostgreSQL allows storage parameters to be set on indexes. The storage
parameters available depend on the index method used by the index. Storage
parameters can be specified on :class:`.Index` using the ``postgresql_with``
keyword argument::
Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50})
.. versionadded:: 1.0.6
PostgreSQL allows to define the tablespace in which to create the index.
The tablespace can be specified on :class:`.Index` using the
``postgresql_tablespace`` keyword argument::
Index('my_index', my_table.c.data, postgresql_tablespace='my_tablespace')
.. versionadded:: 1.1
Note that the same option is available on :class:`_schema.Table` as well.
.. _postgresql_index_concurrently:
Indexes with CONCURRENTLY
^^^^^^^^^^^^^^^^^^^^^^^^^
The PostgreSQL index option CONCURRENTLY is supported by passing the
flag ``postgresql_concurrently`` to the :class:`.Index` construct::
tbl = Table('testtbl', m, Column('data', Integer))
idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
The above index construct will render DDL for CREATE INDEX, assuming
PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as::
CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
For DROP INDEX, assuming PostgreSQL 9.2 or higher is detected or for
a connection-less dialect, it will emit::
DROP INDEX CONCURRENTLY test_idx1
.. versionadded:: 1.1 support for CONCURRENTLY on DROP INDEX. The
CONCURRENTLY keyword is now only emitted if a high enough version
of PostgreSQL is detected on the connection (or for a connection-less
dialect).
When using CONCURRENTLY, the PostgreSQL database requires that the statement
be invoked outside of a transaction block. The Python DBAPI enforces that
even for a single statement, a transaction is present, so to use this
construct, the DBAPI's "autocommit" mode must be used::
metadata = MetaData()
table = Table(
"foo", metadata,
Column("id", String))
index = Index(
"foo_idx", table.c.id, postgresql_concurrently=True)
with engine.connect() as conn:
with conn.execution_options(isolation_level='AUTOCOMMIT'):
table.create(conn)
.. seealso::
:ref:`postgresql_isolation_level`
.. _postgresql_index_reflection:
PostgreSQL Index Reflection
---------------------------
The PostgreSQL database creates a UNIQUE INDEX implicitly whenever the
UNIQUE CONSTRAINT construct is used. When inspecting a table using
:class:`_reflection.Inspector`, the :meth:`_reflection.Inspector.get_indexes`
and the :meth:`_reflection.Inspector.get_unique_constraints`
will report on these
two constructs distinctly; in the case of the index, the key
``duplicates_constraint`` will be present in the index entry if it is
detected as mirroring a constraint. When performing reflection using
``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned
in :attr:`_schema.Table.indexes` when it is detected as mirroring a
:class:`.UniqueConstraint` in the :attr:`_schema.Table.constraints` collection
.
.. versionchanged:: 1.0.0 - :class:`_schema.Table` reflection now includes
:class:`.UniqueConstraint` objects present in the
:attr:`_schema.Table.constraints`
collection; the PostgreSQL backend will no longer include a "mirrored"
:class:`.Index` construct in :attr:`_schema.Table.indexes`
if it is detected
as corresponding to a unique constraint.
Special Reflection Options
--------------------------
The :class:`_reflection.Inspector`
used for the PostgreSQL backend is an instance
of :class:`.PGInspector`, which offers additional methods::
from sqlalchemy import create_engine, inspect
engine = create_engine("postgresql+psycopg2://localhost/test")
insp = inspect(engine) # will be a PGInspector
print(insp.get_enums())
.. autoclass:: PGInspector
:members:
.. _postgresql_table_options:
PostgreSQL Table Options
------------------------
Several options for CREATE TABLE are supported directly by the PostgreSQL
dialect in conjunction with the :class:`_schema.Table` construct:
* ``TABLESPACE``::
Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace')
The above option is also available on the :class:`.Index` construct.
* ``ON COMMIT``::
Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS')
* ``WITH OIDS``::
Table("some_table", metadata, ..., postgresql_with_oids=True)
* ``WITHOUT OIDS``::
Table("some_table", metadata, ..., postgresql_with_oids=False)
* ``INHERITS``::
Table("some_table", metadata, ..., postgresql_inherits="some_supertable")
Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...))
.. versionadded:: 1.0.0
* ``PARTITION BY``::
Table("some_table", metadata, ...,
postgresql_partition_by='LIST (part_column)')
.. versionadded:: 1.2.6
.. seealso::
`PostgreSQL CREATE TABLE options
<http://www.postgresql.org/docs/current/static/sql-createtable.html>`_
Table values, Row and Tuple objects
-----------------------------------
Row Types
^^^^^^^^^
Built-in support for rendering a ``ROW`` is not available yet, however the
:func:`_expression.tuple_` may be used in its place. Another alternative is
to use the :attr:`_sa.func` generator with ``func.ROW`` ::
table.select().where(
tuple_(table.c.id, table.c.fk) > (1,2)
).where(func.ROW(table.c.id, table.c.fk) < func.ROW(3, 7))
Will generate the row-wise comparison::
SELECT *
FROM table
WHERE (id, fk) > (1, 2)
AND ROW(id, fk) < ROW(3, 7)
.. seealso::
`PostgreSQL Row Constructors
<https://www.postgresql.org/docs/current/sql-expressions.html#SQL-SYNTAX-ROW-CONSTRUCTORS>`_
`PostgreSQL Row Constructor Comparison
<https://www.postgresql.org/docs/current/functions-comparisons.html#ROW-WISE-COMPARISON>`_
Table Types
^^^^^^^^^^^
PostgreSQL also supports passing a table as an argument to a function. This
is not available yet in sqlalchemy, however the
:func:`_expression.literal_column` function with the name of the table may be
used in its place::
select(['*']).select_from(func.my_function(literal_column('my_table')))
Will generate the SQL::
SELECT *
FROM my_function(my_table)
ARRAY Types
-----------
The PostgreSQL dialect supports arrays, both as multidimensional column types
as well as array literals:
* :class:`_postgresql.ARRAY` - ARRAY datatype
* :class:`_postgresql.array` - array literal
* :func:`_postgresql.array_agg` - ARRAY_AGG SQL function
* :class:`_postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate
function syntax.
JSON Types
----------
The PostgreSQL dialect supports both JSON and JSONB datatypes, including
psycopg2's native support and support for all of PostgreSQL's special
operators:
* :class:`_postgresql.JSON`
* :class:`_postgresql.JSONB`
HSTORE Type
-----------
The PostgreSQL HSTORE type as well as hstore literals are supported:
* :class:`_postgresql.HSTORE` - HSTORE datatype
* :class:`_postgresql.hstore` - hstore literal
ENUM Types
----------
PostgreSQL has an independently creatable TYPE structure which is used
to implement an enumerated type. This approach introduces significant
complexity on the SQLAlchemy side in terms of when this type should be
CREATED and DROPPED. The type object is also an independently reflectable
entity. The following sections should be consulted:
* :class:`_postgresql.ENUM` - DDL and typing support for ENUM.
* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types
* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual
CREATE and DROP commands for ENUM.
.. _postgresql_array_of_enum:
Using ENUM with ARRAY
^^^^^^^^^^^^^^^^^^^^^
The combination of ENUM and ARRAY is not directly supported by backend
DBAPIs at this time. Prior to SQLAlchemy 1.3.17, a special workaround
was needed in order to allow this combination to work, described below.
.. versionchanged:: 1.3.17 The combination of ENUM and ARRAY is now directly
handled by SQLAlchemy's implementation without any workarounds needed.
.. sourcecode:: python
from sqlalchemy import TypeDecorator
from sqlalchemy.dialects.postgresql import ARRAY
class ArrayOfEnum(TypeDecorator):
impl = ARRAY
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
super_rp = super(ArrayOfEnum, self).result_processor(
dialect, coltype)
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
return inner.split(",") if inner else []
def process(value):
if value is None:
return None
return super_rp(handle_raw_string(value))
return process
E.g.::
Table(
'mydata', metadata,
Column('id', Integer, primary_key=True),
Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum')))
)
This type is not included as a built-in type as it would be incompatible
with a DBAPI that suddenly decides to support ARRAY of ENUM directly in
a new version.
.. _postgresql_array_of_json:
Using JSON/JSONB with ARRAY
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Similar to using ENUM, prior to SQLAlchemy 1.3.17, for an ARRAY of JSON/JSONB
we need to render the appropriate CAST. Current psycopg2 drivers accomodate
the result set correctly without any special steps.
.. versionchanged:: 1.3.17 The combination of JSON/JSONB and ARRAY is now
directly handled by SQLAlchemy's implementation without any workarounds
needed.
.. sourcecode:: python
class CastingArray(ARRAY):
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
E.g.::
Table(
'mydata', metadata,
Column('id', Integer, primary_key=True),
Column('data', CastingArray(JSONB))
)
"""
from collections import defaultdict
import datetime as dt
import re
from . import array as _array
from . import hstore as _hstore
from . import json as _json
from . import ranges as _ranges
from ... import exc
from ... import schema
from ... import sql
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import elements
from ...sql import expression
from ...sql import sqltypes
from ...sql import util as sql_util
from ...types import BIGINT
from ...types import BOOLEAN
from ...types import CHAR
from ...types import DATE
from ...types import FLOAT
from ...types import INTEGER
from ...types import NUMERIC
from ...types import REAL
from ...types import SMALLINT
from ...types import TEXT
from ...types import VARCHAR
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
IDX_USING = re.compile(r"^(?:btree|hash|gist|gin|[\w_]+)$", re.I)
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|GRANT|REVOKE|"
"IMPORT FOREIGN SCHEMA|REFRESH MATERIALIZED VIEW|TRUNCATE)",
re.I | re.UNICODE,
)
RESERVED_WORDS = set(
[
"all",
"analyse",
"analyze",
"and",
"any",
"array",
"as",
"asc",
"asymmetric",
"both",
"case",
"cast",
"check",
"collate",
"column",
"constraint",
"create",
"current_catalog",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_user",
"default",
"deferrable",
"desc",
"distinct",
"do",
"else",
"end",
"except",
"false",
"fetch",
"for",
"foreign",
"from",
"grant",
"group",
"having",
"in",
"initially",
"intersect",
"into",
"leading",
"limit",
"localtime",
"localtimestamp",
"new",
"not",
"null",
"of",
"off",
"offset",
"old",
"on",
"only",
"or",
"order",
"placing",
"primary",
"references",
"returning",
"select",
"session_user",
"some",
"symmetric",
"table",
"then",
"to",
"trailing",
"true",
"union",
"unique",
"user",
"using",
"variadic",
"when",
"where",
"window",
"with",
"authorization",
"between",
"binary",
"cross",
"current_schema",
"freeze",
"full",
"ilike",
"inner",
"is",
"isnull",
"join",
"left",
"like",
"natural",
"notnull",
"outer",
"over",
"overlaps",
"right",
"similar",
"verbose",
]
)
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
class BYTEA(sqltypes.LargeBinary):
__visit_name__ = "BYTEA"
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = "DOUBLE_PRECISION"
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
PGMacAddr = MACADDR
class MONEY(sqltypes.TypeEngine):
"""Provide the PostgreSQL MONEY type.
.. versionadded:: 1.2
"""
__visit_name__ = "MONEY"
class OID(sqltypes.TypeEngine):
"""Provide the PostgreSQL OID type.
.. versionadded:: 0.9.5
"""
__visit_name__ = "OID"
class REGCLASS(sqltypes.TypeEngine):
"""Provide the PostgreSQL REGCLASS type.
.. versionadded:: 1.2.7
"""
__visit_name__ = "REGCLASS"
class TIMESTAMP(sqltypes.TIMESTAMP):
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
"""PostgreSQL INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
__visit_name__ = "INTERVAL"
native = True
def __init__(self, precision=None, fields=None):
"""Construct an INTERVAL.
:param precision: optional integer precision value
:param fields: string fields specifier. allows storage of fields
to be limited, such as ``"YEAR"``, ``"MONTH"``, ``"DAY TO HOUR"``,
etc.
.. versionadded:: 1.2
"""
self.precision = precision
self.fields = fields
@classmethod
def adapt_emulated_to_native(cls, interval, **kw):
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
@property
def python_type(self):
return dt.timedelta
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
def __init__(self, length=None, varying=False):
if not varying:
# BIT without VARYING defaults to length 1
self.length = length or 1
else:
# but BIT VARYING can be unlimited-length, so no default
self.length = length
self.varying = varying
PGBit = BIT
class UUID(sqltypes.TypeEngine):
"""PostgreSQL UUID type.
Represents the UUID column type, interpreting
data either as natively returned by the DBAPI
or as Python uuid objects.
The UUID type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = "UUID"
def __init__(self, as_uuid=False):
"""Construct a UUID type.
:param as_uuid=False: if True, values will be interpreted
as Python uuid objects, converting to/from string via the
DBAPI.
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
"This version of Python does not support "
"the native UUID type."
)
self.as_uuid = as_uuid
def bind_processor(self, dialect):
if self.as_uuid:
def process(value):
if value is not None:
value = util.text_type(value)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
else:
return None
PGUuid = UUID
class TSVECTOR(sqltypes.TypeEngine):
"""The :class:`_postgresql.TSVECTOR` type implements the PostgreSQL
text search type TSVECTOR.
It can be used to do full text queries on natural language
documents.
.. versionadded:: 0.9.0
.. seealso::
:ref:`postgresql_match`
"""
__visit_name__ = "TSVECTOR"
class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
"""PostgreSQL ENUM type.
This is a subclass of :class:`_types.Enum` which includes
support for PG's ``CREATE TYPE`` and ``DROP TYPE``.
When the builtin type :class:`_types.Enum` is used and the
:paramref:`.Enum.native_enum` flag is left at its default of
True, the PostgreSQL backend will use a :class:`_postgresql.ENUM`
type as the implementation, so the special create/drop rules
will be used.
The create/drop behavior of ENUM is necessarily intricate, due to the
awkward relationship the ENUM type has in relationship to the
parent table, in that it may be "owned" by just a single table, or
may be shared among many tables.
When using :class:`_types.Enum` or :class:`_postgresql.ENUM`
in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted
corresponding to when the :meth:`_schema.Table.create` and
:meth:`_schema.Table.drop`
methods are called::
table = Table('sometable', metadata,
Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
)
table.create(engine) # will emit CREATE ENUM and CREATE TABLE
table.drop(engine) # will emit DROP TABLE and DROP ENUM
To use a common enumerated type between multiple tables, the best
practice is to declare the :class:`_types.Enum` or
:class:`_postgresql.ENUM` independently, and associate it with the
:class:`_schema.MetaData` object itself::
my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
t1 = Table('sometable_one', metadata,
Column('some_enum', myenum)
)
t2 = Table('sometable_two', metadata,
Column('some_enum', myenum)
)
When this pattern is used, care must still be taken at the level
of individual table creates. Emitting CREATE TABLE without also
specifying ``checkfirst=True`` will still cause issues::
t1.create(engine) # will fail: no such type 'myenum'
If we specify ``checkfirst=True``, the individual table-level create
operation will check for the ``ENUM`` and create if not exists::
# will check if enum exists, and emit CREATE TYPE if not
t1.create(engine, checkfirst=True)
When using a metadata-level ENUM type, the type will always be created
and dropped if either the metadata-wide create/drop is called::
metadata.create_all(engine) # will emit CREATE TYPE
metadata.drop_all(engine) # will emit DROP TYPE
The type can also be created and dropped directly::
my_enum.create(engine)
my_enum.drop(engine)
.. versionchanged:: 1.0.0 The PostgreSQL :class:`_postgresql.ENUM` type
now behaves more strictly with regards to CREATE/DROP. A metadata-level
ENUM type will only be created and dropped at the metadata level,
not the table level, with the exception of
``table.create(checkfirst=True)``.
The ``table.drop()`` call will now emit a DROP TYPE for a table-level
enumerated type.
"""
native_enum = True
def __init__(self, *enums, **kw):
"""Construct an :class:`_postgresql.ENUM`.
Arguments are the same as that of
:class:`_types.Enum`, but also including
the following parameters.
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
table is being created; and additionally
that ``DROP TYPE`` is called when the table
is dropped. When ``False``, no check
will be performed and no ``CREATE TYPE``
or ``DROP TYPE`` is emitted, unless
:meth:`~.postgresql.ENUM.create`
or :meth:`~.postgresql.ENUM.drop`
are called directly.
Setting to ``False`` is helpful
when invoking a creation scheme to a SQL file
without access to the actual database -
the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods can
be used to emit SQL to a target bind.
"""
self.create_type = kw.pop("create_type", True)
super(ENUM, self).__init__(*enums, **kw)
@classmethod
def adapt_emulated_to_native(cls, impl, **kw):
"""Produce a PostgreSQL native :class:`_postgresql.ENUM` from plain
:class:`.Enum`.
"""
kw.setdefault("validate_strings", impl.validate_strings)
kw.setdefault("name", impl.name)
kw.setdefault("schema", impl.schema)
kw.setdefault("inherit_schema", impl.inherit_schema)
kw.setdefault("metadata", impl.metadata)
kw.setdefault("_create_events", False)
kw.setdefault("values_callable", impl.values_callable)
return cls(**kw)
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL CREATE TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or not bind.dialect.has_type(
bind, self.name, schema=self.schema
):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL DROP TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or bind.dialect.has_type(
bind, self.name, schema=self.schema
):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
"""Look in the 'ddl runner' for 'memos', then
note our name in that collection.
This to ensure a particular named enum is operated
upon only once within any kind of create/drop
sequence without relying upon "checkfirst".
"""
if not self.create_type:
return True
if "_ddl_runner" in kw:
ddl_runner = kw["_ddl_runner"]
if "_pg_enums" in ddl_runner.memo:
pg_enums = ddl_runner.memo["_pg_enums"]
else:
pg_enums = ddl_runner.memo["_pg_enums"] = set()
present = (self.schema, self.name) in pg_enums
pg_enums.add((self.schema, self.name))
return present
else:
return False
def _on_table_create(self, target, bind, checkfirst=False, **kw):
if (
checkfirst
or (
not self.metadata
and not kw.get("_is_metadata_operation", False)
)
and not self._check_for_name_in_memos(checkfirst, kw)
):
self.create(bind=bind, checkfirst=checkfirst)
def _on_table_drop(self, target, bind, checkfirst=False, **kw):
if (
not self.metadata
and not kw.get("_is_metadata_operation", False)
and not self._check_for_name_in_memos(checkfirst, kw)
):
self.drop(bind=bind, checkfirst=checkfirst)
def _on_metadata_create(self, target, bind, checkfirst=False, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_metadata_drop(self, target, bind, checkfirst=False, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=checkfirst)
colspecs = {
sqltypes.ARRAY: _array.ARRAY,
sqltypes.Interval: INTERVAL,
sqltypes.Enum: ENUM,
sqltypes.JSON.JSONPathType: _json.JSONPathType,
sqltypes.JSON: _json.JSON,
}
ischema_names = {
"_array": _array.ARRAY,
"hstore": _hstore.HSTORE,
"json": _json.JSON,
"jsonb": _json.JSONB,
"int4range": _ranges.INT4RANGE,
"int8range": _ranges.INT8RANGE,
"numrange": _ranges.NUMRANGE,
"daterange": _ranges.DATERANGE,
"tsrange": _ranges.TSRANGE,
"tstzrange": _ranges.TSTZRANGE,
"integer": INTEGER,
"bigint": BIGINT,
"smallint": SMALLINT,
"character varying": VARCHAR,
"character": CHAR,
'"char"': sqltypes.String,
"name": sqltypes.String,
"text": TEXT,
"numeric": NUMERIC,
"float": FLOAT,
"real": REAL,
"inet": INET,
"cidr": CIDR,
"uuid": UUID,
"bit": BIT,
"bit varying": BIT,
"macaddr": MACADDR,
"money": MONEY,
"oid": OID,
"regclass": REGCLASS,
"double precision": DOUBLE_PRECISION,
"timestamp": TIMESTAMP,
"timestamp with time zone": TIMESTAMP,
"timestamp without time zone": TIMESTAMP,
"time with time zone": TIME,
"time without time zone": TIME,
"date": DATE,
"time": TIME,
"bytea": BYTEA,
"boolean": BOOLEAN,
"interval": INTERVAL,
"tsvector": TSVECTOR,
}
class PGCompiler(compiler.SQLCompiler):
def visit_array(self, element, **kw):
return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
def visit_slice(self, element, **kw):
return "%s:%s" % (
self.process(element.start, **kw),
self.process(element.stop, **kw),
)
def visit_json_getitem_op_binary(
self, binary, operator, _cast_applied=False, **kw
):
if (
not _cast_applied
and binary.type._type_affinity is not sqltypes.JSON
):
kw["_cast_applied"] = True
return self.process(sql.cast(binary, binary.type), **kw)
kw["eager_grouping"] = True
return self._generate_generic_binary(
binary, " -> " if not _cast_applied else " ->> ", **kw
)
def visit_json_path_getitem_op_binary(
self, binary, operator, _cast_applied=False, **kw
):
if (
not _cast_applied
and binary.type._type_affinity is not sqltypes.JSON
):
kw["_cast_applied"] = True
return self.process(sql.cast(binary, binary.type), **kw)
kw["eager_grouping"] = True
return self._generate_generic_binary(
binary, " #> " if not _cast_applied else " #>> ", **kw
)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_aggregate_order_by(self, element, **kw):
return "%s ORDER BY %s" % (
self.process(element.target, **kw),
self.process(element.order_by, **kw),
)
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
binary.modifiers["postgresql_regconfig"], sqltypes.STRINGTYPE
)
if regconfig:
return "%s @@ to_tsquery(%s, %s)" % (
self.process(binary.left, **kw),
regconfig,
self.process(binary.right, **kw),
)
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s ILIKE %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT ILIKE %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_empty_set_expr(self, element_types):
# cast the empty set to the type we are comparing against. if
# we are comparing against the null type, pick an arbitrary
# datatype for the empty set
return "SELECT %s WHERE 1!=1" % (
", ".join(
"CAST(NULL AS %s)"
% self.dialect.type_compiler.process(
INTEGER() if type_._isnull else type_
)
for type_ in element_types or [INTEGER()]
),
)
def render_literal_value(self, value, type_):
value = super(PGCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
def visit_sequence(self, seq, **kw):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += " \n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += " \n LIMIT ALL"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def format_from_hint_text(self, sqltext, table, hint, iscrud):
if hint.upper() != "ONLY":
raise exc.CompileError("Unrecognized hint: %r" % hint)
return "ONLY " + sqltext
def get_select_precolumns(self, select, **kw):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
return (
"DISTINCT ON ("
+ ", ".join(
[self.process(col, **kw) for col in select._distinct]
)
+ ") "
)
else:
return (
"DISTINCT ON ("
+ self.process(select._distinct, **kw)
+ ") "
)
else:
return ""
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
if select._for_update_arg.key_share:
tmp = " FOR KEY SHARE"
else:
tmp = " FOR SHARE"
elif select._for_update_arg.key_share:
tmp = " FOR NO KEY UPDATE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tables = util.OrderedSet()
for c in select._for_update_arg.of:
tables.update(sql_util.surface_selectables_only(c))
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
start = self.process(func.clauses.clauses[1], **kw)
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2], **kw)
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = "ON CONSTRAINT %s" % clause.constraint_target
elif clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
if isinstance(c, util.string_types)
else self.process(c, include_table=False, use_schema=False)
)
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += " WHERE %s" % self.process(
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
)
else:
target_text = ""
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
if elements._is_literal(value):
value = elements.BindParameter(None, value, type_=c.type)
else:
if (
isinstance(value, elements.BindParameter)
and value.type._isnull
):
value = value._clone()
value.type = c.type
value_text = self.process(value.self_group(), use_schema=False)
key_text = self.preparer.quote(col_key)
action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.statement.table.name,
(", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
key_text = (
self.preparer.quote(k)
if isinstance(k, util.string_types)
else self.process(k, use_schema=False)
)
value_text = self.process(
elements._literal_as_binds(v), use_schema=False
)
action_set_ops.append("%s = %s" % (key_text, value_text))
action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
action_text += " WHERE %s" % self.process(
clause.update_whereclause, include_table=True, use_schema=False
)
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. USING clause specific to PostgreSQL."""
return "USING " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
impl_type = column.type.dialect_impl(self.dialect)
if isinstance(impl_type, sqltypes.TypeDecorator):
impl_type = impl_type.impl
if (
column.primary_key
and column is column.table._autoincrement_column
and (
self.dialect.supports_smallserial
or not isinstance(impl_type, sqltypes.SmallInteger)
)
and (
column.default is None
or (
isinstance(column.default, schema.Sequence)
and column.default.optional
)
)
):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
elif isinstance(impl_type, sqltypes.SmallInteger):
colspec += " SMALLSERIAL"
else:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(
column.type,
type_expression=column,
identifier_preparer=self.preparer,
)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.computed is not None:
colspec += " " + self.process(column.computed)
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_check_constraint(self, constraint):
if constraint._type_bound:
typ = list(constraint.columns)[0].type
if (
isinstance(typ, sqltypes.ARRAY)
and isinstance(typ.item_type, sqltypes.Enum)
and not typ.item_type.native_enum
):
raise exc.CompileError(
"PostgreSQL dialect cannot produce the CHECK constraint "
"for ARRAY of non-native ENUM; please specify "
"create_constraint=False on this Enum datatype."
)
return super(PGDDLCompiler, self).visit_check_constraint(constraint)
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table(
drop.element
)
def visit_create_enum_type(self, create):
type_ = create.element
return "CREATE TYPE %s AS ENUM (%s)" % (
self.preparer.format_type(type_),
", ".join(
self.sql_compiler.process(sql.literal(e), literal_binds=True)
for e in type_.enums
),
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
return "DROP TYPE %s" % (self.preparer.format_type(type_))
def visit_create_index(self, create):
preparer = self.preparer
index = create.element
self._verify_index_table(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
if self.dialect._supports_create_index_concurrently:
concurrently = index.dialect_options["postgresql"]["concurrently"]
if concurrently:
text += "CONCURRENTLY "
text += "%s ON %s " % (
self._prepared_index_name(index, include_schema=False),
preparer.format_table(index.table),
)
using = index.dialect_options["postgresql"]["using"]
if using:
text += (
"USING %s "
% self.preparer.validate_sql_phrase(using, IDX_USING).lower()
)
ops = index.dialect_options["postgresql"]["ops"]
text += "(%s)" % (
", ".join(
[
self.sql_compiler.process(
expr.self_group()
if not isinstance(expr, expression.ColumnClause)
else expr,
include_table=False,
literal_binds=True,
)
+ (
(" " + ops[expr.key])
if hasattr(expr, "key") and expr.key in ops
else ""
)
for expr in index.expressions
]
)
)
withclause = index.dialect_options["postgresql"]["with"]
if withclause:
text += " WITH (%s)" % (
", ".join(
[
"%s = %s" % storage_parameter
for storage_parameter in withclause.items()
]
)
)
tablespace_name = index.dialect_options["postgresql"]["tablespace"]
if tablespace_name:
text += " TABLESPACE %s" % preparer.quote(tablespace_name)
whereclause = index.dialect_options["postgresql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def visit_drop_index(self, drop):
index = drop.element
text = "\nDROP INDEX "
if self.dialect._supports_drop_index_concurrently:
concurrently = index.dialect_options["postgresql"]["concurrently"]
if concurrently:
text += "CONCURRENTLY "
text += self._prepared_index_name(index, include_schema=True)
return text
def visit_exclude_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(
constraint
)
elements = []
for expr, name, op in constraint._render_exprs:
kw["include_table"] = False
elements.append(
"%s WITH %s" % (self.sql_compiler.process(expr, **kw), op)
)
text += "EXCLUDE USING %s (%s)" % (
self.preparer.validate_sql_phrase(
constraint.using, IDX_USING
).lower(),
", ".join(elements),
)
if constraint.where is not None:
text += " WHERE (%s)" % self.sql_compiler.process(
constraint.where, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def post_create_table(self, table):
table_opts = []
pg_opts = table.dialect_options["postgresql"]
inherits = pg_opts.get("inherits")
if inherits is not None:
if not isinstance(inherits, (list, tuple)):
inherits = (inherits,)
table_opts.append(
"\n INHERITS ( "
+ ", ".join(self.preparer.quote(name) for name in inherits)
+ " )"
)
if pg_opts["partition_by"]:
table_opts.append("\n PARTITION BY %s" % pg_opts["partition_by"])
if pg_opts["with_oids"] is True:
table_opts.append("\n WITH OIDS")
elif pg_opts["with_oids"] is False:
table_opts.append("\n WITHOUT OIDS")
if pg_opts["on_commit"]:
on_commit_options = pg_opts["on_commit"].replace("_", " ").upper()
table_opts.append("\n ON COMMIT %s" % on_commit_options)
if pg_opts["tablespace"]:
tablespace_name = pg_opts["tablespace"]
table_opts.append(
"\n TABLESPACE %s" % self.preparer.quote(tablespace_name)
)
return "".join(table_opts)
def visit_computed_column(self, generated):
if generated.persisted is False:
raise exc.CompileError(
"PostrgreSQL computed columns do not support 'virtual' "
"persistence; set the 'persisted' flag to None or True for "
"PostgreSQL support."
)
return "GENERATED ALWAYS AS (%s) STORED" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_TSVECTOR(self, type_, **kw):
return "TSVECTOR"
def visit_INET(self, type_, **kw):
return "INET"
def visit_CIDR(self, type_, **kw):
return "CIDR"
def visit_MACADDR(self, type_, **kw):
return "MACADDR"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_OID(self, type_, **kw):
return "OID"
def visit_REGCLASS(self, type_, **kw):
return "REGCLASS"
def visit_FLOAT(self, type_, **kw):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {"precision": type_.precision}
def visit_DOUBLE_PRECISION(self, type_, **kw):
return "DOUBLE PRECISION"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_HSTORE(self, type_, **kw):
return "HSTORE"
def visit_JSON(self, type_, **kw):
return "JSON"
def visit_JSONB(self, type_, **kw):
return "JSONB"
def visit_INT4RANGE(self, type_, **kw):
return "INT4RANGE"
def visit_INT8RANGE(self, type_, **kw):
return "INT8RANGE"
def visit_NUMRANGE(self, type_, **kw):
return "NUMRANGE"
def visit_DATERANGE(self, type_, **kw):
return "DATERANGE"
def visit_TSRANGE(self, type_, **kw):
return "TSRANGE"
def visit_TSTZRANGE(self, type_, **kw):
return "TSTZRANGE"
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_enum(self, type_, **kw):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_, **kw)
else:
return self.visit_ENUM(type_, **kw)
def visit_ENUM(self, type_, identifier_preparer=None, **kw):
if identifier_preparer is None:
identifier_preparer = self.dialect.identifier_preparer
return identifier_preparer.format_type(type_)
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP%s %s" % (
"(%d)" % type_.precision
if getattr(type_, "precision", None) is not None
else "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
)
def visit_TIME(self, type_, **kw):
return "TIME%s %s" % (
"(%d)" % type_.precision
if getattr(type_, "precision", None) is not None
else "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
)
def visit_INTERVAL(self, type_, **kw):
text = "INTERVAL"
if type_.fields is not None:
text += " " + type_.fields
if type_.precision is not None:
text += " (%d)" % type_.precision
return text
def visit_BIT(self, type_, **kw):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
compiled += "(%d)" % type_.length
else:
compiled = "BIT(%d)" % type_.length
return compiled
def visit_UUID(self, type_, **kw):
return "UUID"
def visit_large_binary(self, type_, **kw):
return self.visit_BYTEA(type_, **kw)
def visit_BYTEA(self, type_, **kw):
return "BYTEA"
def visit_ARRAY(self, type_, **kw):
# TODO: pass **kw?
inner = self.process(type_.item_type)
return re.sub(
r"((?: COLLATE.*)?)$",
(
r"%s\1"
% (
"[]"
* (type_.dimensions if type_.dimensions is not None else 1)
)
),
inner,
count=1,
)
class PGIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].replace(
self.escape_to_quote, self.escape_quote
)
return value
def format_type(self, type_, use_schema=True):
if not type_.name:
raise exc.CompileError("PostgreSQL ENUM type requires a name.")
name = self.quote(type_.name)
effective_schema = self.schema_for_object(type_)
if (
not self.omit_schema
and use_schema
and effective_schema is not None
):
name = self.quote_schema(effective_schema) + "." + name
return name
class PGInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the OID for the given table name."""
return self.dialect.get_table_oid(
self.bind, table_name, schema, info_cache=self.info_cache
)
def get_enums(self, schema=None):
"""Return a list of ENUM objects.
Each member is a dictionary containing these fields:
* name - name of the enum
* schema - the schema name for the enum.
* visible - boolean, whether or not this enum is visible
in the default search path.
* labels - a list of string labels that apply to the enum.
:param schema: schema name. If None, the default schema
(typically 'public') is used. May also be set to '*' to
indicate load enums for all schemas.
.. versionadded:: 1.0.0
"""
schema = schema or self.default_schema_name
return self.dialect._load_enums(self.bind, schema)
def get_foreign_table_names(self, schema=None):
"""Return a list of FOREIGN TABLE names.
Behavior is similar to that of
:meth:`_reflection.Inspector.get_table_names`,
except that the list is limited to those tables that report a
``relkind`` value of ``f``.
.. versionadded:: 1.0.0
"""
schema = schema or self.default_schema_name
return self.dialect._get_foreign_table_names(self.bind, schema)
def get_view_names(self, schema=None, include=("plain", "materialized")):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
:param include: specify which types of views to return. Passed
as a string value (for a single type) or a tuple (for any number
of types). Defaults to ``('plain', 'materialized')``.
.. versionadded:: 1.1
"""
return self.dialect.get_view_names(
self.bind, schema, info_cache=self.info_cache, include=include
)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
class DropEnumType(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
class PGExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(
(
"select nextval('%s')"
% self.dialect.identifier_preparer.format_sequence(seq)
),
type_,
)
def get_insert_default(self, column):
if column.primary_key and column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar(
"select %s" % column.server_default.arg, column.type
)
elif column.default is None or (
column.default.is_sequence and column.default.optional
):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0 : 29 + max(0, (29 - len(col)))]
col = col[0 : 29 + max(0, (29 - len(tab)))]
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
if column.table is not None:
effective_schema = self.connection.schema_for_object(
column.table
)
else:
effective_schema = None
if effective_schema is not None:
exc = 'select nextval(\'"%s"."%s"\')' % (
effective_schema,
seq_name,
)
else:
exc = "select nextval('\"%s\"')" % (seq_name,)
return self._execute_scalar(exc, column.type)
return super(PGExecutionContext, self).get_insert_default(column)
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
class PGDialect(default.DefaultDialect):
name = "postgresql"
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
supports_native_enum = True
supports_native_boolean = True
supports_smallserial = True
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_comments = True
supports_default_values = True
supports_empty_insert = False
supports_multivalues_insert = True
default_paramstyle = "pyformat"
ischema_names = ischema_names
colspecs = colspecs
statement_compiler = PGCompiler
ddl_compiler = PGDDLCompiler
type_compiler = PGTypeCompiler
preparer = PGIdentifierPreparer
execution_ctx_cls = PGExecutionContext
inspector = PGInspector
isolation_level = None
construct_arguments = [
(
schema.Index,
{
"using": False,
"where": None,
"ops": {},
"concurrently": False,
"with": {},
"tablespace": None,
},
),
(
schema.Table,
{
"ignore_search_path": False,
"tablespace": None,
"partition_by": None,
"with_oids": None,
"on_commit": None,
"inherits": None,
},
),
]
reflection_options = ("postgresql_ignore_search_path",)
_backslash_escapes = True
_supports_create_index_concurrently = True
_supports_drop_index_concurrently = True
def __init__(
self,
isolation_level=None,
json_serializer=None,
json_deserializer=None,
**kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
self._json_serializer = json_serializer
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (
8,
2,
) and self.__dict__.get("implicit_returning", True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
# pop base Enum type
self.colspecs.pop(sqltypes.Enum, None)
# psycopg2, others may have placed ENUM here as well
self.colspecs.pop(ENUM, None)
# http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
self.supports_smallserial = self.server_version_info >= (9, 2)
self._backslash_escapes = (
self.server_version_info < (8, 2)
or connection.scalar("show standard_conforming_strings") == "off"
)
self._supports_create_index_concurrently = (
self.server_version_info >= (8, 2)
)
self._supports_drop_index_concurrently = self.server_version_info >= (
9,
2,
)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level
)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute("show transaction isolation level")
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if is_prepared:
if recover:
# FIXME: ugly hack to get out of transaction
# context when committing recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
connection.execute("COMMIT PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(
sql.text("SELECT gid FROM pg_prepared_xacts")
)
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
query = (
"select nspname from pg_namespace " "where lower(nspname)=:schema"
)
cursor = connection.execute(
sql.text(query).bindparams(
sql.bindparam(
"schema",
util.text_type(schema.lower()),
type_=sqltypes.Unicode,
)
)
)
return bool(cursor.first())
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where "
"pg_catalog.pg_table_is_visible(c.oid) "
"and relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(table_name),
type_=sqltypes.Unicode,
)
)
)
else:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=:schema and "
"relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(table_name),
type_=sqltypes.Unicode,
),
sql.bindparam(
"schema",
util.text_type(schema),
type_=sqltypes.Unicode,
),
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
if schema is None:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=current_schema() "
"and relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(sequence_name),
type_=sqltypes.Unicode,
)
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(sequence_name),
type_=sqltypes.Unicode,
),
sql.bindparam(
"schema",
util.text_type(schema),
type_=sqltypes.Unicode,
),
)
)
return bool(cursor.first())
def has_type(self, connection, type_name, schema=None):
if schema is not None:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
WHERE t.typnamespace = n.oid
AND t.typname = :typname
AND n.nspname = :nspname
)
"""
query = sql.text(query)
else:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t
WHERE t.typname = :typname
AND pg_type_is_visible(t.oid)
)
"""
query = sql.text(query)
query = query.bindparams(
sql.bindparam(
"typname", util.text_type(type_name), type_=sqltypes.Unicode
)
)
if schema is not None:
query = query.bindparams(
sql.bindparam(
"nspname", util.text_type(schema), type_=sqltypes.Unicode
)
)
cursor = connection.execute(query)
return bool(cursor.scalar())
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match(
r".*(?:PostgreSQL|EnterpriseDB) "
r"(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel|beta)?",
v,
)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % v
)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
def get_table_oid(self, connection, table_name, schema=None, **kw):
"""Fetch the oid for schema.table_name.
Several reflection methods require the table oid. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_oid = None
if schema is not None:
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
query = (
"""
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in
('r', 'v', 'm', 'f', 'p')
"""
% schema_where_clause
)
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = util.text_type(table_name)
if schema is not None:
schema = util.text_type(schema)
s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
s = s.columns(oid=sqltypes.Integer)
if schema:
s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode))
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
raise exc.NoSuchTableError(table_name)
return table_oid
@reflection.cache
def get_schema_names(self, connection, **kw):
result = connection.execute(
sql.text(
"SELECT nspname FROM pg_namespace "
"WHERE nspname NOT LIKE 'pg_%' "
"ORDER BY nspname"
).columns(nspname=sqltypes.Unicode)
)
return [name for name, in result]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
result = connection.execute(
sql.text(
"SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind in ('r', 'p')"
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
)
return [name for name, in result]
@reflection.cache
def _get_foreign_table_names(self, connection, schema=None, **kw):
result = connection.execute(
sql.text(
"SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind = 'f'"
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
)
return [name for name, in result]
@reflection.cache
def get_view_names(
self, connection, schema=None, include=("plain", "materialized"), **kw
):
include_kind = {"plain": "v", "materialized": "m"}
try:
kinds = [include_kind[i] for i in util.to_list(include)]
except KeyError:
raise ValueError(
"include %r unknown, needs to be a sequence containing "
"one or both of 'plain' and 'materialized'" % (include,)
)
if not kinds:
raise ValueError(
"empty include, needs to be a sequence containing "
"one or both of 'plain' and 'materialized'"
)
result = connection.execute(
sql.text(
"SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind IN (%s)"
% (", ".join("'%s'" % elem for elem in kinds))
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
)
return [name for name, in result]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
view_def = connection.scalar(
sql.text(
"SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relname = :view_name "
"AND c.relkind IN ('v', 'm')"
).columns(view_def=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
view_name=view_name,
)
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
generated = (
"a.attgenerated as generated"
if self.server_version_info >= (12,)
else "NULL as generated"
)
SQL_COLS = (
"""
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid,
pgd.description as comment,
%s
FROM pg_catalog.pg_attribute a
LEFT JOIN pg_catalog.pg_description pgd ON (
pgd.objoid = a.attrelid AND pgd.objsubid = a.attnum)
WHERE a.attrelid = :table_oid
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
% generated
)
s = (
sql.text(SQL_COLS)
.bindparams(sql.bindparam("table_oid", type_=sqltypes.Integer))
.columns(attname=sqltypes.Unicode, default=sqltypes.Unicode)
)
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
# dictionary with (name, ) if default search path or (schema, name)
# as keys
domains = self._load_domains(connection)
# dictionary with (name, ) if default search path or (schema, name)
# as keys
enums = dict(
((rec["name"],), rec)
if rec["visible"]
else ((rec["schema"], rec["name"]), rec)
for rec in self._load_enums(connection, schema="*")
)
# format columns
columns = []
for (
name,
format_type,
default_,
notnull,
attnum,
table_oid,
comment,
generated,
) in rows:
column_info = self._get_column_info(
name,
format_type,
default_,
notnull,
domains,
enums,
schema,
comment,
generated,
)
columns.append(column_info)
return columns
def _get_column_info(
self,
name,
format_type,
default,
notnull,
domains,
enums,
schema,
comment,
generated,
):
def _handle_array_type(attype):
return (
# strip '[]' from integer[], etc.
re.sub(r"\[\]$", "", attype),
attype.endswith("[]"),
)
# strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
attype = re.sub(r"\(.*\)", "", format_type)
# strip '[]' from integer[], etc. and check if an array
attype, is_array = _handle_array_type(attype)
# strip quotes from case sensitive enum or domain names
enum_or_domain_key = tuple(util.quoted_token_parser(attype))
nullable = not notnull
charlen = re.search(r"\(([\d,]+)\)", format_type)
if charlen:
charlen = charlen.group(1)
args = re.search(r"\((.*)\)", format_type)
if args and args.group(1):
args = tuple(re.split(r"\s*,\s*", args.group(1)))
else:
args = ()
kwargs = {}
if attype == "numeric":
if charlen:
prec, scale = charlen.split(",")
args = (int(prec), int(scale))
else:
args = ()
elif attype == "double precision":
args = (53,)
elif attype == "integer":
args = ()
elif attype in ("timestamp with time zone", "time with time zone"):
kwargs["timezone"] = True
if charlen:
kwargs["precision"] = int(charlen)
args = ()
elif attype in (
"timestamp without time zone",
"time without time zone",
"time",
):
kwargs["timezone"] = False
if charlen:
kwargs["precision"] = int(charlen)
args = ()
elif attype == "bit varying":
kwargs["varying"] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype.startswith("interval"):
field_match = re.match(r"interval (.+)", attype, re.I)
if charlen:
kwargs["precision"] = int(charlen)
if field_match:
kwargs["fields"] = field_match.group(1)
attype = "interval"
args = ()
elif charlen:
args = (int(charlen),)
while True:
# looping here to suit nested domains
if attype in self.ischema_names:
coltype = self.ischema_names[attype]
break
elif enum_or_domain_key in enums:
enum = enums[enum_or_domain_key]
coltype = ENUM
kwargs["name"] = enum["name"]
if not enum["visible"]:
kwargs["schema"] = enum["schema"]
args = tuple(enum["labels"])
break
elif enum_or_domain_key in domains:
domain = domains[enum_or_domain_key]
attype = domain["attype"]
attype, is_array = _handle_array_type(attype)
# strip quotes from case sensitive enum or domain names
enum_or_domain_key = tuple(util.quoted_token_parser(attype))
# A table can't override whether the domain is nullable.
nullable = domain["nullable"]
if domain["default"] and not default:
# It can, however, override the default
# value, but can't set it to null.
default = domain["default"]
continue
else:
coltype = None
break
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = self.ischema_names["_array"](coltype)
else:
util.warn(
"Did not recognize type '%s' of column '%s'" % (attype, name)
)
coltype = sqltypes.NULLTYPE
# If a zero byte (''), then not a generated column.
# Otherwise, s = stored. (Other values might be added in the future.)
if generated:
computed = dict(sqltext=default, persisted=generated == "s")
default = None
else:
computed = None
# adjust the default value
autoincrement = False
if default is not None:
match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
if match is not None:
if issubclass(coltype._type_affinity, sqltypes.Integer):
autoincrement = True
# the default is related to a Sequence
sch = schema
if "." not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
default = (
match.group(1)
+ ('"%s"' % sch)
+ "."
+ match.group(2)
+ match.group(3)
)
column_info = dict(
name=name,
type=coltype,
nullable=nullable,
default=default,
autoincrement=autoincrement,
comment=comment,
)
if computed is not None:
column_info["computed"] = computed
return column_info
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
if self.server_version_info < (8, 4):
PK_SQL = """
SELECT a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_attribute a
on t.oid=a.attrelid AND %s
WHERE
t.oid = :table_oid and ix.indisprimary = 't'
ORDER BY a.attnum
""" % self._pg_index_any(
"a.attnum", "ix.indkey"
)
else:
# unnest() and generate_subscripts() both introduced in
# version 8.4
PK_SQL = """
SELECT a.attname
FROM pg_attribute a JOIN (
SELECT unnest(ix.indkey) attnum,
generate_subscripts(ix.indkey, 1) ord
FROM pg_index ix
WHERE ix.indrelid = :table_oid AND ix.indisprimary
) k ON a.attnum=k.attnum
WHERE a.attrelid = :table_oid
ORDER BY k.ord
"""
t = sql.text(PK_SQL).columns(attname=sqltypes.Unicode)
c = connection.execute(t, table_oid=table_oid)
cols = [r[0] for r in c.fetchall()]
PK_CONS_SQL = """
SELECT conname
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
t = sql.text(PK_CONS_SQL).columns(conname=sqltypes.Unicode)
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
return {"constrained_columns": cols, "name": name}
@reflection.cache
def get_foreign_keys(
self,
connection,
table_name,
schema=None,
postgresql_ignore_search_path=False,
**kw
):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
FK_SQL = """
SELECT r.conname,
pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
n.nspname as conschema
FROM pg_catalog.pg_constraint r,
pg_namespace n,
pg_class c
WHERE r.conrelid = :table AND
r.contype = 'f' AND
c.oid = confrelid AND
n.oid = c.relnamespace
ORDER BY 1
"""
# http://www.postgresql.org/docs/9.0/static/sql-createtable.html
FK_REGEX = re.compile(
r"FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)"
r"[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?"
r"[\s]?(ON UPDATE "
r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
r"[\s]?(ON DELETE "
r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
r"[\s]?(DEFERRABLE|NOT DEFERRABLE)?"
r"[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?"
)
t = sql.text(FK_SQL).columns(
conname=sqltypes.Unicode, condef=sqltypes.Unicode
)
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search(FK_REGEX, condef).groups()
(
constrained_columns,
referred_schema,
referred_table,
referred_columns,
_,
match,
_,
onupdate,
_,
ondelete,
deferrable,
_,
initially,
) = m
if deferrable is not None:
deferrable = True if deferrable == "DEFERRABLE" else False
constrained_columns = [
preparer._unquote_identifier(x)
for x in re.split(r"\s*,\s*", constrained_columns)
]
if postgresql_ignore_search_path:
# when ignoring search path, we use the actual schema
# provided it isn't the "default" schema
if conschema != self.default_schema_name:
referred_schema = conschema
else:
referred_schema = schema
elif referred_schema:
# referred_schema is the schema that we regexp'ed from
# pg_get_constraintdef(). If the schema is in the search
# path, pg_get_constraintdef() will give us None.
referred_schema = preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# If the actual schema matches the schema of the table
# we're reflecting, then we will use that.
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [
preparer._unquote_identifier(x)
for x in re.split(r"\s*,\s", referred_columns)
]
fkey_d = {
"name": conname,
"constrained_columns": constrained_columns,
"referred_schema": referred_schema,
"referred_table": referred_table,
"referred_columns": referred_columns,
"options": {
"onupdate": onupdate,
"ondelete": ondelete,
"deferrable": deferrable,
"initially": initially,
"match": match,
},
}
fkeys.append(fkey_d)
return fkeys
def _pg_index_any(self, col, compare_to):
if self.server_version_info < (8, 1):
# http://www.postgresql.org/message-id/10279.1124395722@sss.pgh.pa.us
# "In CVS tip you could replace this with "attnum = ANY (indkey)".
# Unfortunately, most array support doesn't work on int2vector in
# pre-8.1 releases, so I think you're kinda stuck with the above
# for now.
# regards, tom lane"
return "(%s)" % " OR ".join(
"%s[%d] = %s" % (compare_to, ind, col) for ind in range(0, 10)
)
else:
return "%s = ANY(%s)" % (col, compare_to)
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
# cast indkey as varchar since it's an int2vector,
# returned as a list by some drivers such as pypostgresql
if self.server_version_info < (8, 5):
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, NULL, ix.indkey%s,
%s, %s, am.amname,
NULL as indnkeyatts
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid = ix.indexrelid
left outer join
pg_attribute a
on t.oid = a.attrelid and %s
left outer join
pg_am am
on i.relam = am.oid
WHERE
t.relkind IN ('r', 'v', 'f', 'm')
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
""" % (
# version 8.3 here was based on observing the
# cast does not work in PG 8.2.4, does work in 8.3.0.
# nothing in PG changelogs regarding this.
"::varchar" if self.server_version_info >= (8, 3) else "",
"ix.indoption::varchar"
if self.server_version_info >= (8, 3)
else "NULL",
"i.reloptions"
if self.server_version_info >= (8, 2)
else "NULL",
self._pg_index_any("a.attnum", "ix.indkey"),
)
else:
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, c.conrelid, ix.indkey::varchar,
ix.indoption::varchar, i.reloptions, am.amname,
%s as indnkeyatts
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid = ix.indexrelid
left outer join
pg_attribute a
on t.oid = a.attrelid and a.attnum = ANY(ix.indkey)
left outer join
pg_constraint c
on (ix.indrelid = c.conrelid and
ix.indexrelid = c.conindid and
c.contype in ('p', 'u', 'x'))
left outer join
pg_am am
on i.relam = am.oid
WHERE
t.relkind IN ('r', 'v', 'f', 'm', 'p')
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
""" % (
"ix.indnkeyatts"
if self.server_version_info >= (11, 0)
else "NULL",
)
t = sql.text(IDX_SQL).columns(
relname=sqltypes.Unicode, attname=sqltypes.Unicode
)
c = connection.execute(t, table_oid=table_oid)
indexes = defaultdict(lambda: defaultdict(dict))
sv_idx_name = None
for row in c.fetchall():
(
idx_name,
unique,
expr,
prd,
col,
col_num,
conrelid,
idx_key,
idx_option,
options,
amname,
indnkeyatts,
) = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx_name
)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
% idx_name
)
sv_idx_name = idx_name
has_idx = idx_name in indexes
index = indexes[idx_name]
if col is not None:
index["cols"][col_num] = col
if not has_idx:
idx_keys = idx_key.split()
# "The number of key columns in the index, not counting any
# included columns, which are merely stored and do not
# participate in the index semantics"
if indnkeyatts and idx_keys[indnkeyatts:]:
util.warn(
"INCLUDE columns for covering index %s "
"ignored during reflection" % (idx_name,)
)
idx_keys = idx_keys[:indnkeyatts]
index["key"] = [int(k.strip()) for k in idx_keys]
# (new in pg 8.3)
# "pg_index.indoption" is list of ints, one per column/expr.
# int acts as bitmask: 0x01=DESC, 0x02=NULLSFIRST
sorting = {}
for col_idx, col_flags in enumerate(
(idx_option or "").split()
):
col_flags = int(col_flags.strip())
col_sorting = ()
# try to set flags only if they differ from PG defaults...
if col_flags & 0x01:
col_sorting += ("desc",)
if not (col_flags & 0x02):
col_sorting += ("nullslast",)
else:
if col_flags & 0x02:
col_sorting += ("nullsfirst",)
if col_sorting:
sorting[col_idx] = col_sorting
if sorting:
index["sorting"] = sorting
index["unique"] = unique
if conrelid is not None:
index["duplicates_constraint"] = idx_name
if options:
index["options"] = dict(
[option.split("=") for option in options]
)
# it *might* be nice to include that this is 'btree' in the
# reflection info. But we don't want an Index object
# to have a ``postgresql_using`` in it that is just the
# default, so for the moment leaving this out.
if amname and amname != "btree":
index["amname"] = amname
result = []
for name, idx in indexes.items():
entry = {
"name": name,
"unique": idx["unique"],
"column_names": [idx["cols"][i] for i in idx["key"]],
}
if "duplicates_constraint" in idx:
entry["duplicates_constraint"] = idx["duplicates_constraint"]
if "sorting" in idx:
entry["column_sorting"] = dict(
(idx["cols"][idx["key"][i]], value)
for i, value in idx["sorting"].items()
)
if "options" in idx:
entry.setdefault("dialect_options", {})[
"postgresql_with"
] = idx["options"]
if "amname" in idx:
entry.setdefault("dialect_options", {})[
"postgresql_using"
] = idx["amname"]
result.append(entry)
return result
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
UNIQUE_SQL = """
SELECT
cons.conname as name,
cons.conkey as key,
a.attnum as col_num,
a.attname as col_name
FROM
pg_catalog.pg_constraint cons
join pg_attribute a
on cons.conrelid = a.attrelid AND
a.attnum = ANY(cons.conkey)
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'u'
"""
t = sql.text(UNIQUE_SQL).columns(col_name=sqltypes.Unicode)
c = connection.execute(t, table_oid=table_oid)
uniques = defaultdict(lambda: defaultdict(dict))
for row in c.fetchall():
uc = uniques[row.name]
uc["key"] = row.key
uc["cols"][row.col_num] = row.col_name
return [
{"name": name, "column_names": [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
]
@reflection.cache
def get_table_comment(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
COMMENT_SQL = """
SELECT
pgd.description as table_comment
FROM
pg_catalog.pg_description pgd
WHERE
pgd.objsubid = 0 AND
pgd.objoid = :table_oid
"""
c = connection.execute(sql.text(COMMENT_SQL), table_oid=table_oid)
return {"text": c.scalar()}
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
CHECK_SQL = """
SELECT
cons.conname as name,
pg_get_constraintdef(cons.oid) as src
FROM
pg_catalog.pg_constraint cons
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'c'
"""
c = connection.execute(sql.text(CHECK_SQL), table_oid=table_oid)
ret = []
for name, src in c:
# samples:
# "CHECK (((a > 1) AND (a < 5)))"
# "CHECK (((a = 1) OR ((a > 2) AND (a < 5))))"
# "CHECK (((a > 1) AND (a < 5))) NOT VALID"
# "CHECK (some_boolean_function(a))"
# "CHECK (((a\n < 1)\n OR\n (a\n >= 5))\n)"
m = re.match(
r"^CHECK *\((.+)\)( NOT VALID)?$", src, flags=re.DOTALL
)
if not m:
util.warn("Could not parse CHECK constraint text: %r" % src)
sqltext = ""
else:
sqltext = re.compile(
r"^[\s\n]*\((.+)\)[\s\n]*$", flags=re.DOTALL
).sub(r"\1", m.group(1))
entry = {"name": name, "sqltext": sqltext}
if m and m.group(2):
entry["dialect_options"] = {"not_valid": True}
ret.append(entry)
return ret
def _load_enums(self, connection, schema=None):
schema = schema or self.default_schema_name
if not self.supports_native_enum:
return {}
# Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
"""
if schema != "*":
SQL_ENUMS += "AND n.nspname = :schema "
# e.oid gives us label order within an enum
SQL_ENUMS += 'ORDER BY "schema", "name", e.oid'
s = sql.text(SQL_ENUMS).columns(
attname=sqltypes.Unicode, label=sqltypes.Unicode
)
if schema != "*":
s = s.bindparams(schema=schema)
c = connection.execute(s)
enums = []
enum_by_name = {}
for enum in c.fetchall():
key = (enum["schema"], enum["name"])
if key in enum_by_name:
enum_by_name[key]["labels"].append(enum["label"])
else:
enum_by_name[key] = enum_rec = {
"name": enum["name"],
"schema": enum["schema"],
"visible": enum["visible"],
"labels": [],
}
if enum["label"] is not None:
enum_rec["labels"].append(enum["label"])
enums.append(enum_rec)
return enums
def _load_domains(self, connection):
# Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
not t.typnotnull as "nullable",
t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE t.typtype = 'd'
"""
s = sql.text(SQL_DOMAINS).columns(attname=sqltypes.Unicode)
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
# strip (30) from character varying(30)
attype = re.search(r"([^\(]+)", domain["attype"]).group(1)
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
if domain["visible"]:
key = (domain["name"],)
else:
key = (domain["schema"], domain["name"])
domains[key] = {
"attype": attype,
"nullable": domain["nullable"],
"default": domain["default"],
}
return domains
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/ext.py | # postgresql/ext.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .array import ARRAY
from ...sql import elements
from ...sql import expression
from ...sql import functions
from ...sql.schema import ColumnCollectionConstraint
class aggregate_order_by(expression.ColumnElement):
"""Represent a PostgreSQL aggregate order by expression.
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
would represent the expression::
SELECT array_agg(a ORDER BY b DESC) FROM table;
Similarly::
expr = func.string_agg(
table.c.a,
aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select([expr])
Would represent::
SELECT string_agg(a, ',' ORDER BY a) FROM table;
.. versionadded:: 1.1
.. versionchanged:: 1.2.13 - the ORDER BY argument may be multiple terms
.. seealso::
:class:`_functions.array_agg`
"""
__visit_name__ = "aggregate_order_by"
def __init__(self, target, *order_by):
self.target = elements._literal_as_binds(target)
_lob = len(order_by)
if _lob == 0:
raise TypeError("at least one ORDER BY element is required")
elif _lob == 1:
self.order_by = elements._literal_as_binds(order_by[0])
else:
self.order_by = elements.ClauseList(
*order_by, _literal_as_text=elements._literal_as_binds
)
def self_group(self, against=None):
return self
def get_children(self, **kwargs):
return self.target, self.order_by
def _copy_internals(self, clone=elements._clone, **kw):
self.target = clone(self.target, **kw)
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return self.target._from_objects + self.order_by._from_objects
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
__ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
""" # noqa
__visit_name__ = "exclude_constraint"
where = None
@elements._document_text_coercion(
"where",
":class:`.ExcludeConstraint`",
":paramref:`.ExcludeConstraint.where`",
)
def __init__(self, *elements, **kw):
r"""
Create an :class:`.ExcludeConstraint` object.
E.g.::
const = ExcludeConstraint(
(Column('period'), '&&'),
(Column('group'), '='),
where=(Column('group') != 'some group')
)
The constraint is normally embedded into the :class:`_schema.Table`
construct
directly, or added later using :meth:`.append_constraint`::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('period', TSRANGE()),
Column('group', String)
)
some_table.append_constraint(
ExcludeConstraint(
(some_table.c.period, '&&'),
(some_table.c.group, '='),
where=some_table.c.group != 'some group',
name='some_table_excl_const'
)
)
:param \*elements:
A sequence of two tuples of the form ``(column, operator)`` where
"column" is a SQL expression element or a raw SQL string, most
typically a :class:`_schema.Column` object,
and "operator" is a string
containing the operator to use. In order to specify a column name
when a :class:`_schema.Column` object is not available,
while ensuring
that any necessary quoting rules take effect, an ad-hoc
:class:`_schema.Column` or :func:`_expression.column`
object should be
used.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param using:
Optional string. If set, emit USING <index_method> when issuing DDL
for this constraint. Defaults to 'gist'.
:param where:
Optional SQL expression construct or literal SQL string.
If set, emit WHERE <predicate> when issuing DDL
for this constraint.
"""
columns = []
render_exprs = []
self.operators = {}
expressions, operators = zip(*elements)
for (expr, column, strname, add_element), operator in zip(
self._extract_col_expression_collection(expressions), operators
):
if add_element is not None:
columns.append(add_element)
name = column.name if column is not None else strname
if name is not None:
# backwards compat
self.operators[name] = operator
expr = expression._literal_as_column(expr)
render_exprs.append((expr, name, operator))
self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
*columns,
name=kw.get("name"),
deferrable=kw.get("deferrable"),
initially=kw.get("initially")
)
self.using = kw.get("using", "gist")
where = kw.get("where")
if where is not None:
self.where = expression._literal_as_text(
where, allow_coercion_to_text=True
)
def copy(self, **kw):
elements = [(col, self.operators[col]) for col in self.columns.keys()]
c = self.__class__(
*elements,
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
where=self.where,
using=self.using
)
c.dispatch._update(self.dispatch)
return c
def array_agg(*arg, **kw):
"""PostgreSQL-specific form of :class:`_functions.array_agg`, ensures
return type is :class:`_postgresql.ARRAY` and not
the plain :class:`_types.ARRAY`, unless an explicit ``type_``
is passed.
.. versionadded:: 1.1
"""
kw["_default_array_type"] = ARRAY
return functions.func.array_agg(*arg, **kw)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/hstore.py | # postgresql/hstore.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .array import ARRAY
from ... import types as sqltypes
from ... import util
from ...sql import functions as sqlfunc
from ...sql import operators
__all__ = ("HSTORE", "hstore")
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
GETITEM = operators.custom_op(
"->",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_KEY = operators.custom_op(
"?",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_ALL = operators.custom_op(
"?&",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
HAS_ANY = operators.custom_op(
"?|",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
CONTAINS = operators.custom_op(
"@>",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
CONTAINED_BY = operators.custom_op(
"<@",
precedence=idx_precedence,
natural_self_precedent=True,
eager_grouping=True,
)
class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Represent the PostgreSQL HSTORE type.
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', HSTORE)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
)
:class:`.HSTORE` provides for a wide range of operations, including:
* Index operations::
data_table.c.data['some key'] == 'some value'
* Containment operations::
data_table.c.data.has_key('some key')
data_table.c.data.has_all(['one', 'two', 'three'])
* Concatenation::
data_table.c.data + {"k1": "v1"}
For a full list of special methods see
:class:`.HSTORE.comparator_factory`.
For usage with the SQLAlchemy ORM, it may be desirable to combine
the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
now part of the :mod:`sqlalchemy.ext.mutable`
extension. This extension will allow "in-place" changes to the
dictionary, e.g. addition of new keys or replacement/removal of existing
keys to/from the current dictionary, to produce events which will be
detected by the unit of work::
from sqlalchemy.ext.mutable import MutableDict
class MyClass(Base):
__tablename__ = 'data_table'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(HSTORE))
my_object = session.query(MyClass).one()
# in-place mutation, requires Mutable extension
# in order for the ORM to detect
my_object.data['some_key'] = 'some value'
session.commit()
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
will not be alerted to any changes to the contents of an existing
dictionary, unless that dictionary value is re-assigned to the
HSTORE-attribute itself, thus generating a change event.
.. seealso::
:class:`.hstore` - render the PostgreSQL ``hstore()`` function.
"""
__visit_name__ = "HSTORE"
hashable = False
text_type = sqltypes.Text()
def __init__(self, text_type=None):
"""Construct a new :class:`.HSTORE`.
:param text_type: the type that should be used for indexed values.
Defaults to :class:`_types.Text`.
.. versionadded:: 1.1.0
"""
if text_type is not None:
self.text_type = text_type
class Comparator(
sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator
):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
def _setup_getitem(self, index):
return GETITEM, index, self.type.text_type
def defined(self, key):
"""Boolean expression. Test for presence of a non-NULL value for
the key. Note that the key may be a SQLA expression.
"""
return _HStoreDefinedFunction(self.expr, key)
def delete(self, key):
"""HStore expression. Returns the contents of this hstore with the
given key deleted. Note that the key may be a SQLA expression.
"""
if isinstance(key, dict):
key = _serialize_hstore(key)
return _HStoreDeleteFunction(self.expr, key)
def slice(self, array):
"""HStore expression. Returns a subset of an hstore defined by
array of keys.
"""
return _HStoreSliceFunction(self.expr, array)
def keys(self):
"""Text array expression. Returns array of keys."""
return _HStoreKeysFunction(self.expr)
def vals(self):
"""Text array expression. Returns array of values."""
return _HStoreValsFunction(self.expr)
def array(self):
"""Text array expression. Returns array of alternating keys and
values.
"""
return _HStoreArrayFunction(self.expr)
def matrix(self):
"""Text array expression. Returns array of [key, value] pairs."""
return _HStoreMatrixFunction(self.expr)
comparator_factory = Comparator
def bind_processor(self, dialect):
if util.py2k:
encoding = dialect.encoding
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value).encode(encoding)
else:
return value
else:
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value)
else:
return value
return process
def result_processor(self, dialect, coltype):
if util.py2k:
encoding = dialect.encoding
def process(value):
if value is not None:
return _parse_hstore(value.decode(encoding))
else:
return value
else:
def process(value):
if value is not None:
return _parse_hstore(value)
else:
return value
return process
class hstore(sqlfunc.GenericFunction):
"""Construct an hstore value within a SQL expression using the
PostgreSQL ``hstore()`` function.
The :class:`.hstore` function accepts one or two arguments as described
in the PostgreSQL documentation.
E.g.::
from sqlalchemy.dialects.postgresql import array, hstore
select([hstore('key1', 'value1')])
select([
hstore(
array(['key1', 'key2', 'key3']),
array(['value1', 'value2', 'value3'])
)
])
.. seealso::
:class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
"""
type = HSTORE
name = "hstore"
class _HStoreDefinedFunction(sqlfunc.GenericFunction):
type = sqltypes.Boolean
name = "defined"
class _HStoreDeleteFunction(sqlfunc.GenericFunction):
type = HSTORE
name = "delete"
class _HStoreSliceFunction(sqlfunc.GenericFunction):
type = HSTORE
name = "slice"
class _HStoreKeysFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "akeys"
class _HStoreValsFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "avals"
class _HStoreArrayFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "hstore_to_array"
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = "hstore_to_matrix"
#
# parsing. note that none of this is used with the psycopg2 backend,
# which provides its own native extensions.
#
# My best guess at the parsing rules of hstore literals, since no formal
# grammar is given. This is mostly reverse engineered from PG's input parser
# behavior.
HSTORE_PAIR_RE = re.compile(
r"""
(
"(?P<key> (\\ . | [^"])* )" # Quoted key
)
[ ]* => [ ]* # Pair operator, optional adjoining whitespace
(
(?P<value_null> NULL ) # NULL value
| "(?P<value> (\\ . | [^"])* )" # Quoted value
)
""",
re.VERBOSE,
)
HSTORE_DELIMITER_RE = re.compile(
r"""
[ ]* , [ ]*
""",
re.VERBOSE,
)
def _parse_error(hstore_str, pos):
"""format an unmarshalling error."""
ctx = 20
hslen = len(hstore_str)
parsed_tail = hstore_str[max(pos - ctx - 1, 0) : min(pos, hslen)]
residual = hstore_str[min(pos, hslen) : min(pos + ctx + 1, hslen)]
if len(parsed_tail) > ctx:
parsed_tail = "[...]" + parsed_tail[1:]
if len(residual) > ctx:
residual = residual[:-1] + "[...]"
return "After %r, could not parse residual at position %d: %r" % (
parsed_tail,
pos,
residual,
)
def _parse_hstore(hstore_str):
"""Parse an hstore from its literal string representation.
Attempts to approximate PG's hstore input parsing rules as closely as
possible. Although currently this is not strictly necessary, since the
current implementation of hstore's output syntax is stricter than what it
accepts as input, the documentation makes no guarantees that will always
be the case.
"""
result = {}
pos = 0
pair_match = HSTORE_PAIR_RE.match(hstore_str)
while pair_match is not None:
key = pair_match.group("key").replace(r"\"", '"').replace("\\\\", "\\")
if pair_match.group("value_null"):
value = None
else:
value = (
pair_match.group("value")
.replace(r"\"", '"')
.replace("\\\\", "\\")
)
result[key] = value
pos += pair_match.end()
delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
if delim_match is not None:
pos += delim_match.end()
pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
if pos != len(hstore_str):
raise ValueError(_parse_error(hstore_str, pos))
return result
def _serialize_hstore(val):
"""Serialize a dictionary into an hstore literal. Keys and values must
both be strings (except None for values).
"""
def esc(s, position):
if position == "value" and s is None:
return "NULL"
elif isinstance(s, util.string_types):
return '"%s"' % s.replace("\\", "\\\\").replace('"', r"\"")
else:
raise ValueError(
"%r in %s position is not a string." % (s, position)
)
return ", ".join(
"%s=>%s" % (esc(k, "key"), esc(v, "value")) for k, v in val.items()
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py | # oracle/zxjdbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
""" # noqa
import collections
import decimal
import re
from .base import OracleCompiler
from .base import OracleDialect
from .base import OracleExecutionContext
from ... import sql
from ... import types as sqltypes
from ... import util
from ...connectors.zxJDBC import ZxJDBCConnector
from ...engine import result as _result
from ...sql import expression
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
# XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(
expression._select_iterables(returning_cols)
)
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [
self.process(c, within_columns_clause=False)
for c in self.returning_cols
]
if not hasattr(self, "returning_parameters"):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(
self.dialect.dbapi
)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam(
"ret_%d" % i, value=ReturningParam(dbtype)
)
self.binds[bindparam.key] = bindparam
binds.append(
self.bindparam_string(self._truncate_bindparam(bindparam))
)
return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, "returning_parameters"):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, "returning_parameters"):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
next(rrs)
except SQLException as sqle:
msg = "%s [SQLCode: %d]" % (
sqle.getMessage(),
sqle.getErrorCode(),
)
if sqle.getSQLState() is not None:
msg += " [SQLState: %s]" % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(
self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters
)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, "name"):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type_):
self.type = type_
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return "<%s.%s object at 0x%x type=%s>" % (
kls.__module__,
kls.__name__,
id(self),
self.type,
)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = "oracle"
jdbc_driver_name = "oracle.jdbc.OracleDriver"
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{sqltypes.Date: _ZxJDBCDate, sqltypes.Numeric: _ZxJDBCNumeric},
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object_, dbtype=None):
if type(object_) is ReturningParam:
statement.registerReturnParameter(index, object_.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(
self, statement, index, object_
)
else:
OracleDataHandler.setJDBCObject(
self, statement, index, object_, dbtype
)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= "10.2"
def _create_jdbc_url(self, url):
return "jdbc:oracle:thin:@%s:%s:%s" % (
url.host,
url.port or 1521,
url.database,
)
def _get_server_version_info(self, connection):
version = re.search(
r"Release ([\d\.]+)", connection.connection.dbversion
).group(1)
return tuple(int(x) for x in version.split("."))
dialect = OracleDialect_zxjdbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/oracle/__init__.py | # oracle/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base # noqa
from . import cx_oracle # noqa
from . import zxjdbc # noqa
from .base import BFILE
from .base import BINARY_DOUBLE
from .base import BINARY_FLOAT
from .base import BLOB
from .base import CHAR
from .base import CLOB
from .base import DATE
from .base import DOUBLE_PRECISION
from .base import FLOAT
from .base import INTERVAL
from .base import LONG
from .base import NCHAR
from .base import NCLOB
from .base import NUMBER
from .base import NVARCHAR
from .base import NVARCHAR2
from .base import RAW
from .base import ROWID
from .base import TIMESTAMP
from .base import VARCHAR
from .base import VARCHAR2
base.dialect = dialect = cx_oracle.dialect
__all__ = (
"VARCHAR",
"NVARCHAR",
"CHAR",
"NCHAR",
"DATE",
"NUMBER",
"BLOB",
"BFILE",
"CLOB",
"NCLOB",
"TIMESTAMP",
"RAW",
"FLOAT",
"DOUBLE_PRECISION",
"BINARY_DOUBLE",
"BINARY_FLOAT",
"LONG",
"dialect",
"INTERVAL",
"VARCHAR2",
"NVARCHAR2",
"ROWID",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/oracle/provision.py | from ... import create_engine
from ... import exc
from ...engine import url as sa_url
from ...testing.provision import configure_follower
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import log
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
from ...testing.provision import update_db_opts
@create_db.for_db("oracle")
def _oracle_create_db(cfg, eng, ident):
# NOTE: make sure you've run "ALTER DATABASE default tablespace users" or
# similar, so that the default tablespace is not "system"; reflection will
# fail otherwise
with eng.connect() as conn:
conn.execute("create user %s identified by xe" % ident)
conn.execute("create user %s_ts1 identified by xe" % ident)
conn.execute("create user %s_ts2 identified by xe" % ident)
conn.execute("grant dba to %s" % (ident,))
conn.execute("grant unlimited tablespace to %s" % ident)
conn.execute("grant unlimited tablespace to %s_ts1" % ident)
conn.execute("grant unlimited tablespace to %s_ts2" % ident)
@configure_follower.for_db("oracle")
def _oracle_configure_follower(config, ident):
config.test_schema = "%s_ts1" % ident
config.test_schema_2 = "%s_ts2" % ident
def _ora_drop_ignore(conn, dbname):
try:
conn.execute("drop user %s cascade" % dbname)
log.info("Reaped db: %s", dbname)
return True
except exc.DatabaseError as err:
log.warning("couldn't drop db: %s", err)
return False
@drop_db.for_db("oracle")
def _oracle_drop_db(cfg, eng, ident):
with eng.connect() as conn:
# cx_Oracle seems to occasionally leak open connections when a large
# suite it run, even if we confirm we have zero references to
# connection objects.
# while there is a "kill session" command in Oracle,
# it unfortunately does not release the connection sufficiently.
_ora_drop_ignore(conn, ident)
_ora_drop_ignore(conn, "%s_ts1" % ident)
_ora_drop_ignore(conn, "%s_ts2" % ident)
@update_db_opts.for_db("oracle")
def _oracle_update_db_opts(db_url, db_opts):
pass
@run_reap_dbs.for_db("oracle")
def _reap_oracle_dbs(url, idents):
log.info("db reaper connecting to %r", url)
eng = create_engine(url)
with eng.connect() as conn:
log.info("identifiers in file: %s", ", ".join(idents))
to_reap = conn.execute(
"select u.username from all_users u where username "
"like 'TEST_%' and not exists (select username "
"from v$session where username=u.username)"
)
all_names = {username.lower() for (username,) in to_reap}
to_drop = set()
for name in all_names:
if name.endswith("_ts1") or name.endswith("_ts2"):
continue
elif name in idents:
to_drop.add(name)
if "%s_ts1" % name in all_names:
to_drop.add("%s_ts1" % name)
if "%s_ts2" % name in all_names:
to_drop.add("%s_ts2" % name)
dropped = total = 0
for total, username in enumerate(to_drop, 1):
if _ora_drop_ignore(conn, username):
dropped += 1
log.info(
"Dropped %d out of %d stale databases detected", dropped, total
)
@follower_url_from_main.for_db("oracle")
def _oracle_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
url.username = ident
url.password = "xe"
return url
@temp_table_keyword_args.for_db("oracle")
def _oracle_temp_table_keyword_args(cfg, eng):
return {
"prefixes": ["GLOBAL TEMPORARY"],
"oracle_on_commit": "PRESERVE ROWS",
}
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py | # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
:connectstring: oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...]
:url: https://oracle.github.io/python-cx_Oracle/
DSN vs. Hostname connections
-----------------------------
The dialect will connect to a DSN if no database name portion is presented,
such as::
engine = create_engine("oracle+cx_oracle://scott:tiger@oracle1120/?encoding=UTF-8&nencoding=UTF-8")
Above, ``oracle1120`` is passed to cx_Oracle as an Oracle datasource name.
Alternatively, if a database name is present, the ``cx_Oracle.makedsn()``
function is used to create an ad-hoc "datasource" name assuming host
and port::
engine = create_engine("oracle+cx_oracle://scott:tiger@hostname:1521/dbname?encoding=UTF-8&nencoding=UTF-8")
Above, the DSN would be created as follows::
>>> import cx_Oracle
>>> cx_Oracle.makedsn("hostname", 1521, sid="dbname")
'(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=hostname)(PORT=1521))(CONNECT_DATA=(SID=dbname)))'
The ``service_name`` parameter, also consumed by ``cx_Oracle.makedsn()``, may
be specified in the URL query string, e.g. ``?service_name=my_service``.
Passing cx_Oracle connect arguments
-----------------------------------
Additional connection arguments can usually be passed via the URL
query string; particular symbols like ``cx_Oracle.SYSDBA`` are intercepted
and converted to the correct symbol::
e = create_engine(
"oracle+cx_oracle://user:pass@dsn?encoding=UTF-8&nencoding=UTF-8&mode=SYSDBA&events=true")
.. versionchanged:: 1.3 the cx_oracle dialect now accepts all argument names
within the URL string itself, to be passed to the cx_Oracle DBAPI. As
was the case earlier but not correctly documented, the
:paramref:`_sa.create_engine.connect_args` parameter also accepts all
cx_Oracle DBAPI connect arguments.
To pass arguments directly to ``.connect()`` wihtout using the query
string, use the :paramref:`_sa.create_engine.connect_args` dictionary.
Any cx_Oracle parameter value and/or constant may be passed, such as::
import cx_Oracle
e = create_engine(
"oracle+cx_oracle://user:pass@dsn",
connect_args={
"encoding": "UTF-8",
"nencoding": "UTF-8",
"mode": cx_Oracle.SYSDBA,
"events": True
}
)
Options consumed by the SQLAlchemy cx_Oracle dialect outside of the driver
--------------------------------------------------------------------------
There are also options that are consumed by the SQLAlchemy cx_oracle dialect
itself. These options are always passed directly to :func:`_sa.create_engine`
, such as::
e = create_engine(
"oracle+cx_oracle://user:pass@dsn", coerce_to_unicode=False)
The parameters accepted by the cx_oracle dialect are as follows:
* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
to 50. This setting is significant with cx_Oracle as the contents of LOB
objects are only readable within a "live" row (e.g. within a batch of
50 rows).
* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
* ``encoding_errors`` - see :ref:`cx_oracle_unicode_encoding_errors` for detail.
.. _cx_oracle_unicode:
Unicode
-------
As is the case for all DBAPIs under Python 3, all strings are inherently
Unicode strings. Under Python 2, cx_Oracle also supports Python Unicode
objects directly. In all cases however, the driver requires an explcit
encoding configuration.
Ensuring the Correct Client Encoding
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The long accepted standard for establishing client encoding for nearly all
Oracle related software is via the `NLS_LANG <https://www.oracle.com/database/technologies/faq-nls-lang.html>`_
environment variable. cx_Oracle like most other Oracle drivers will use
this environment variable as the source of its encoding configuration. The
format of this variable is idiosyncratic; a typical value would be
``AMERICAN_AMERICA.AL32UTF8``.
The cx_Oracle driver also supports a programmatic alternative which is to
pass the ``encoding`` and ``nencoding`` parameters directly to its
``.connect()`` function. These can be present in the URL as follows::
engine = create_engine("oracle+cx_oracle://scott:tiger@oracle1120/?encoding=UTF-8&nencoding=UTF-8")
For the meaning of the ``encoding`` and ``nencoding`` parameters, please
consult
`Characters Sets and National Language Support (NLS) <https://cx-oracle.readthedocs.io/en/latest/user_guide/globalization.html#globalization>`_.
.. seealso::
`Characters Sets and National Language Support (NLS) <https://cx-oracle.readthedocs.io/en/latest/user_guide/globalization.html#globalization>`_
- in the cx_Oracle documentation.
Unicode-specific Column datatypes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Core expression language handles unicode data by use of the :class:`.Unicode`
and :class:`.UnicodeText`
datatypes. These types correspond to the VARCHAR2 and CLOB Oracle datatypes by
default. When using these datatypes with Unicode data, it is expected that
the Oracle database is configured with a Unicode-aware character set, as well
as that the ``NLS_LANG`` environment variable is set appropriately, so that
the VARCHAR2 and CLOB datatypes can accommodate the data.
In the case that the Oracle database is not configured with a Unicode character
set, the two options are to use the :class:`_types.NCHAR` and
:class:`_oracle.NCLOB` datatypes explicitly, or to pass the flag
``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`,
which will cause the
SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
:class:`.UnicodeText` datatypes instead of VARCHAR/CLOB.
.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText`
datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle datatypes
unless the ``use_nchar_for_unicode=True`` is passed to the dialect
when :func:`_sa.create_engine` is called.
Unicode Coercion of result rows under Python 2
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When result sets are fetched that include strings, under Python 3 the cx_Oracle
DBAPI returns all strings as Python Unicode objects, since Python 3 only has a
Unicode string type. This occurs for data fetched from datatypes such as
VARCHAR2, CHAR, CLOB, NCHAR, NCLOB, etc. In order to provide cross-
compatibility under Python 2, the SQLAlchemy cx_Oracle dialect will add
Unicode-conversion to string data under Python 2 as well. Historically, this
made use of converters that were supplied by cx_Oracle but were found to be
non-performant; SQLAlchemy's own converters are used for the string to Unicode
conversion under Python 2. To disable the Python 2 Unicode conversion for
VARCHAR2, CHAR, and CLOB, the flag ``coerce_to_unicode=False`` can be passed to
:func:`_sa.create_engine`.
.. versionchanged:: 1.3 Unicode conversion is applied to all string values
by default under python 2. The ``coerce_to_unicode`` now defaults to True
and can be set to False to disable the Unicode coercion of strings that are
delivered as VARCHAR2/CHAR/CLOB data.
.. _cx_oracle_unicode_encoding_errors:
Encoding Errors
^^^^^^^^^^^^^^^
For the unusual case that data in the Oracle database is present with a broken
encoding, the dialect accepts a parameter ``encoding_errors`` which will be
passed to Unicode decoding functions in order to affect how decoding errors are
handled. The value is ultimately consumed by the Python `decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`_ function, and
is passed both via cx_Oracle's ``encodingErrors`` parameter consumed by
``Cursor.var()``, as well as SQLAlchemy's own decoding function, as the
cx_Oracle dialect makes use of both under different circumstances.
.. versionadded:: 1.3.11
.. _cx_oracle_setinputsizes:
Fine grained control over cx_Oracle data binding performance with setinputsizes
-------------------------------------------------------------------------------
The cx_Oracle DBAPI has a deep and fundamental reliance upon the usage of the
DBAPI ``setinputsizes()`` call. The purpose of this call is to establish the
datatypes that are bound to a SQL statement for Python values being passed as
parameters. While virtually no other DBAPI assigns any use to the
``setinputsizes()`` call, the cx_Oracle DBAPI relies upon it heavily in its
interactions with the Oracle client interface, and in some scenarios it is not
possible for SQLAlchemy to know exactly how data should be bound, as some
settings can cause profoundly different performance characteristics, while
altering the type coercion behavior at the same time.
Users of the cx_Oracle dialect are **strongly encouraged** to read through
cx_Oracle's list of built-in datatype symbols at
http://cx-oracle.readthedocs.io/en/latest/module.html#types.
Note that in some cases, significant performance degradation can occur when
using these types vs. not, in particular when specifying ``cx_Oracle.CLOB``.
On the SQLAlchemy side, the :meth:`.DialectEvents.do_setinputsizes` event can
be used both for runtime visibility (e.g. logging) of the setinputsizes step as
well as to fully control how ``setinputsizes()`` is used on a per-statement
basis.
.. versionadded:: 1.2.9 Added :meth:`.DialectEvents.setinputsizes`
Example 1 - logging all setinputsizes calls
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following example illustrates how to log the intermediary values from a
SQLAlchemy perspective before they are converted to the raw ``setinputsizes()``
parameter dictionary. The keys of the dictionary are :class:`.BindParameter`
objects which have a ``.key`` and a ``.type`` attribute::
from sqlalchemy import create_engine, event
engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
@event.listens_for(engine, "do_setinputsizes")
def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in inputsizes.items():
log.info(
"Bound parameter name: %s SQLAlchemy type: %r "
"DBAPI object: %s",
bindparam.key, bindparam.type, dbapitype)
Example 2 - remove all bindings to CLOB
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``CLOB`` datatype in cx_Oracle incurs a significant performance overhead,
however is set by default for the ``Text`` type within the SQLAlchemy 1.2
series. This setting can be modified as follows::
from sqlalchemy import create_engine, event
from cx_Oracle import CLOB
engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
@event.listens_for(engine, "do_setinputsizes")
def _remove_clob(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in list(inputsizes.items()):
if dbapitype is CLOB:
del inputsizes[bindparam]
.. _cx_oracle_returning:
RETURNING Support
-----------------
The cx_Oracle dialect implements RETURNING using OUT parameters.
The dialect supports RETURNING fully, however cx_Oracle 6 is recommended
for complete support.
.. _cx_oracle_lob:
LOB Objects
-----------
cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy
converts these to strings so that the interface of the Binary type is
consistent with that of other backends, which takes place within a cx_Oracle
outputtypehandler.
cx_Oracle prior to version 6 would require that LOB objects be read before
a new batch of rows would be read, as determined by the ``cursor.arraysize``.
As of the 6 series, this limitation has been lifted. Nevertheless, because
SQLAlchemy pre-reads these LOBs up front, this issue is avoided in any case.
To disable the auto "read()" feature of the dialect, the flag
``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`. Under
the cx_Oracle 5 series, having this flag turned off means there is the chance
of reading from a stale LOB object if not read as it is fetched. With
cx_Oracle 6, this issue is resolved.
.. versionchanged:: 1.2 the LOB handling system has been greatly simplified
internally to make use of outputtypehandlers, and no longer makes use
of alternate "buffered" result set objects.
Two Phase Transactions Not Supported
-------------------------------------
Two phase transactions are **not supported** under cx_Oracle due to poor
driver support. As of cx_Oracle 6.0b1, the interface for
two phase transactions has been changed to be more of a direct pass-through
to the underlying OCI layer with less automation. The additional logic
to support this system is not implemented in SQLAlchemy.
.. _cx_oracle_numeric:
Precision Numerics
------------------
SQLAlchemy's numeric types can handle receiving and returning values as Python
``Decimal`` objects or float objects. When a :class:`.Numeric` object, or a
subclass such as :class:`.Float`, :class:`_oracle.DOUBLE_PRECISION` etc. is in
use, the :paramref:`.Numeric.asdecimal` flag determines if values should be
coerced to ``Decimal`` upon return, or returned as float objects. To make
matters more complicated under Oracle, Oracle's ``NUMBER`` type can also
represent integer values if the "scale" is zero, so the Oracle-specific
:class:`_oracle.NUMBER` type takes this into account as well.
The cx_Oracle dialect makes extensive use of connection- and cursor-level
"outputtypehandler" callables in order to coerce numeric values as requested.
These callables are specific to the specific flavor of :class:`.Numeric` in
use, as well as if no SQLAlchemy typing objects are present. There are
observed scenarios where Oracle may sends incomplete or ambiguous information
about the numeric types being returned, such as a query where the numeric types
are buried under multiple levels of subquery. The type handlers do their best
to make the right decision in all cases, deferring to the underlying cx_Oracle
DBAPI for all those cases where the driver can make the best decision.
When no typing objects are present, as when executing plain SQL strings, a
default "outputtypehandler" is present which will generally return numeric
values which specify precision and scale as Python ``Decimal`` objects. To
disable this coercion to decimal for performance reasons, pass the flag
``coerce_to_decimal=False`` to :func:`_sa.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
The ``coerce_to_decimal`` flag only impacts the results of plain string
SQL staements that are not otherwise associated with a :class:`.Numeric`
SQLAlchemy type (or a subclass of such).
.. versionchanged:: 1.2 The numeric handling system for cx_Oracle has been
reworked to take advantage of newer cx_Oracle features as well
as better integration of outputtypehandlers.
""" # noqa
from __future__ import absolute_import
import collections
import decimal
import random
import re
from . import base as oracle
from .base import OracleCompiler
from .base import OracleDialect
from .base import OracleExecutionContext
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...engine import result as _result
from ...util import compat
class _OracleInteger(sqltypes.Integer):
def get_dbapi_type(self, dbapi):
# see https://github.com/oracle/python-cx_Oracle/issues/
# 208#issuecomment-409715955
return int
def _cx_oracle_var(self, dialect, cursor):
cx_Oracle = dialect.dbapi
return cursor.var(
cx_Oracle.STRING, 255, arraysize=cursor.arraysize, outconverter=int
)
def _cx_oracle_outputtypehandler(self, dialect):
def handler(cursor, name, default_type, size, precision, scale):
return self._cx_oracle_var(dialect, cursor)
return handler
class _OracleNumeric(sqltypes.Numeric):
is_number = False
def bind_processor(self, dialect):
if self.scale == 0:
return None
elif self.asdecimal:
processor = processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
def process(value):
if isinstance(value, (int, float)):
return processor(value)
elif value is not None and value.is_infinite():
return float(value)
else:
return value
return process
else:
return processors.to_float
def result_processor(self, dialect, coltype):
return None
def _cx_oracle_outputtypehandler(self, dialect):
cx_Oracle = dialect.dbapi
is_cx_oracle_6 = dialect._is_cx_oracle_6
def handler(cursor, name, default_type, size, precision, scale):
outconverter = None
if precision:
if self.asdecimal:
if default_type == cx_Oracle.NATIVE_FLOAT:
# receiving float and doing Decimal after the fact
# allows for float("inf") to be handled
type_ = default_type
outconverter = decimal.Decimal
elif is_cx_oracle_6:
type_ = decimal.Decimal
else:
type_ = cx_Oracle.STRING
outconverter = dialect._to_decimal
else:
if self.is_number and scale == 0:
# integer. cx_Oracle is observed to handle the widest
# variety of ints when no directives are passed,
# from 5.2 to 7.0. See [ticket:4457]
return None
else:
type_ = cx_Oracle.NATIVE_FLOAT
else:
if self.asdecimal:
if default_type == cx_Oracle.NATIVE_FLOAT:
type_ = default_type
outconverter = decimal.Decimal
elif is_cx_oracle_6:
type_ = decimal.Decimal
else:
type_ = cx_Oracle.STRING
outconverter = dialect._to_decimal
else:
if self.is_number and scale == 0:
# integer. cx_Oracle is observed to handle the widest
# variety of ints when no directives are passed,
# from 5.2 to 7.0. See [ticket:4457]
return None
else:
type_ = cx_Oracle.NATIVE_FLOAT
return cursor.var(
type_,
255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
return handler
class _OracleBinaryFloat(_OracleNumeric):
def get_dbapi_type(self, dbapi):
return dbapi.NATIVE_FLOAT
class _OracleBINARY_FLOAT(_OracleBinaryFloat, oracle.BINARY_FLOAT):
pass
class _OracleBINARY_DOUBLE(_OracleBinaryFloat, oracle.BINARY_DOUBLE):
pass
class _OracleNUMBER(_OracleNumeric):
is_number = True
class _OracleDate(sqltypes.Date):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return value.date()
else:
return value
return process
# TODO: the names used across CHAR / VARCHAR / NCHAR / NVARCHAR
# here are inconsistent and not very good
class _OracleChar(sqltypes.CHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_CHAR
class _OracleNChar(sqltypes.NCHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_NCHAR
class _OracleUnicodeStringNCHAR(oracle.NVARCHAR2):
def get_dbapi_type(self, dbapi):
return dbapi.NCHAR
class _OracleUnicodeStringCHAR(sqltypes.Unicode):
def get_dbapi_type(self, dbapi):
return None
class _OracleUnicodeTextNCLOB(oracle.NCLOB):
def get_dbapi_type(self, dbapi):
return dbapi.NCLOB
class _OracleUnicodeTextCLOB(sqltypes.UnicodeText):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleText(sqltypes.Text):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleLong(oracle.LONG):
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
class _OracleString(sqltypes.String):
pass
class _OracleEnum(sqltypes.Enum):
def bind_processor(self, dialect):
enum_proc = sqltypes.Enum.bind_processor(self, dialect)
def process(value):
raw_str = enum_proc(value)
return raw_str
return process
class _OracleBinary(sqltypes.LargeBinary):
def get_dbapi_type(self, dbapi):
return dbapi.BLOB
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if not dialect.auto_convert_lobs:
return None
else:
return super(_OracleBinary, self).result_processor(
dialect, coltype
)
class _OracleInterval(oracle.INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
class _OracleRaw(oracle.RAW):
pass
class _OracleRowid(oracle.ROWID):
def get_dbapi_type(self, dbapi):
return dbapi.ROWID
class OracleCompiler_cx_oracle(OracleCompiler):
_oracle_cx_sql_compiler = True
def bindparam_string(self, name, **kw):
quote = getattr(name, "quote", None)
if (
quote is True
or quote is not False
and self.preparer._bindparam_requires_quotes(name)
):
if kw.get("expanding", False):
raise exc.CompileError(
"Can't use expanding feature with parameter name "
"%r on Oracle; it requires quoting which is not supported "
"in this context." % name
)
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
else:
return OracleCompiler.bindparam_string(self, name, **kw)
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
out_parameters = None
def _setup_quoted_bind_names(self):
quoted_bind_names = self.compiled._quoted_bind_names
if quoted_bind_names:
for param in self.parameters:
for fromname, toname in quoted_bind_names.items():
param[toname] = param[fromname]
del param[fromname]
def _handle_out_parameters(self):
# if a single execute, check for outparams
if len(self.compiled_parameters) == 1:
quoted_bind_names = self.compiled._quoted_bind_names
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
name = self.compiled.bind_names[bindparam]
type_impl = bindparam.type.dialect_impl(self.dialect)
if hasattr(type_impl, "_cx_oracle_var"):
self.out_parameters[name] = type_impl._cx_oracle_var(
self.dialect, self.cursor
)
else:
dbtype = type_impl.get_dbapi_type(self.dialect.dbapi)
if dbtype is None:
raise exc.InvalidRequestError(
"Cannot create out parameter for parameter "
"%r - its type %r is not supported by"
" cx_oracle" % (bindparam.key, bindparam.type)
)
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][
quoted_bind_names.get(name, name)
] = self.out_parameters[name]
def _generate_cursor_outputtype_handler(self):
output_handlers = {}
for (keyname, name, objects, type_) in self.compiled._result_columns:
handler = type_._cached_custom_processor(
self.dialect,
"cx_oracle_outputtypehandler",
self._get_cx_oracle_type_handler,
)
if handler:
denormalized_name = self.dialect.denormalize_name(keyname)
output_handlers[denormalized_name] = handler
if output_handlers:
default_handler = self._dbapi_connection.outputtypehandler
def output_type_handler(
cursor, name, default_type, size, precision, scale
):
if name in output_handlers:
return output_handlers[name](
cursor, name, default_type, size, precision, scale
)
else:
return default_handler(
cursor, name, default_type, size, precision, scale
)
self.cursor.outputtypehandler = output_type_handler
def _get_cx_oracle_type_handler(self, impl):
if hasattr(impl, "_cx_oracle_outputtypehandler"):
return impl._cx_oracle_outputtypehandler(self.dialect)
else:
return None
def pre_exec(self):
if not getattr(self.compiled, "_oracle_cx_sql_compiler", False):
return
self.out_parameters = {}
if self.compiled._quoted_bind_names:
self._setup_quoted_bind_names()
self.set_input_sizes(
self.compiled._quoted_bind_names,
include_types=self.dialect._include_setinputsizes,
)
self._handle_out_parameters()
self._generate_cursor_outputtype_handler()
def create_cursor(self):
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_result_proxy(self):
if self.out_parameters and self.compiled.returning:
returning_params = [
self.dialect._returningval(self.out_parameters["ret_%d" % i])
for i in range(len(self.out_parameters))
]
return ReturningResultProxy(self, returning_params)
result = _result.ResultProxy(self)
if self.out_parameters:
if (
self.compiled_parameters is not None
and len(self.compiled_parameters) == 1
):
result.out_parameters = out_parameters = {}
for bind, name in self.compiled.bind_names.items():
if name in self.out_parameters:
type_ = bind.type
impl_type = type_.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(
self.dialect.dbapi
)
result_processor = impl_type.result_processor(
self.dialect, dbapi_type
)
if result_processor is not None:
out_parameters[name] = result_processor(
self.dialect._paramval(
self.out_parameters[name]
)
)
else:
out_parameters[name] = self.dialect._paramval(
self.out_parameters[name]
)
else:
result.out_parameters = dict(
(k, self._dialect._paramval(v))
for k, v in self.out_parameters.items()
)
return result
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""Result proxy which stuffs the _returning clause + outparams
into the fetch."""
def __init__(self, context, returning_params):
self._returning_params = returning_params
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
returning = self.context.compiled.returning
return [
(getattr(col, "name", col.anon_label), None) for col in returning
]
def _buffer_rows(self):
return collections.deque([tuple(self._returning_params)])
class OracleDialect_cx_oracle(OracleDialect):
execution_ctx_cls = OracleExecutionContext_cx_oracle
statement_compiler = OracleCompiler_cx_oracle
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode_statements = True
supports_unicode_binds = True
driver = "cx_oracle"
colspecs = {
sqltypes.Numeric: _OracleNumeric,
sqltypes.Float: _OracleNumeric,
oracle.BINARY_FLOAT: _OracleBINARY_FLOAT,
oracle.BINARY_DOUBLE: _OracleBINARY_DOUBLE,
sqltypes.Integer: _OracleInteger,
oracle.NUMBER: _OracleNUMBER,
sqltypes.Date: _OracleDate,
sqltypes.LargeBinary: _OracleBinary,
sqltypes.Boolean: oracle._OracleBoolean,
sqltypes.Interval: _OracleInterval,
oracle.INTERVAL: _OracleInterval,
sqltypes.Text: _OracleText,
sqltypes.String: _OracleString,
sqltypes.UnicodeText: _OracleUnicodeTextCLOB,
sqltypes.CHAR: _OracleChar,
sqltypes.NCHAR: _OracleNChar,
sqltypes.Enum: _OracleEnum,
oracle.LONG: _OracleLong,
oracle.RAW: _OracleRaw,
sqltypes.Unicode: _OracleUnicodeStringCHAR,
sqltypes.NVARCHAR: _OracleUnicodeStringNCHAR,
oracle.NCLOB: _OracleUnicodeTextNCLOB,
oracle.ROWID: _OracleRowid,
}
execute_sequence_format = list
_cx_oracle_threaded = None
@util.deprecated_params(
threaded=(
"1.3",
"The 'threaded' parameter to the cx_oracle dialect "
"is deprecated as a dialect-level argument, and will be removed "
"in a future release. As of version 1.3, it defaults to False "
"rather than True. The 'threaded' option can be passed to "
"cx_Oracle directly in the URL query string passed to "
":func:`_sa.create_engine`.",
)
)
def __init__(
self,
auto_convert_lobs=True,
coerce_to_unicode=True,
coerce_to_decimal=True,
arraysize=50,
encoding_errors=None,
threaded=None,
**kwargs
):
OracleDialect.__init__(self, **kwargs)
self.arraysize = arraysize
self.encoding_errors = encoding_errors
if threaded is not None:
self._cx_oracle_threaded = threaded
self.auto_convert_lobs = auto_convert_lobs
self.coerce_to_unicode = coerce_to_unicode
self.coerce_to_decimal = coerce_to_decimal
if self._use_nchar_for_unicode:
self.colspecs = self.colspecs.copy()
self.colspecs[sqltypes.Unicode] = _OracleUnicodeStringNCHAR
self.colspecs[sqltypes.UnicodeText] = _OracleUnicodeTextNCLOB
cx_Oracle = self.dbapi
if cx_Oracle is None:
self._include_setinputsizes = {}
self.cx_oracle_ver = (0, 0, 0)
else:
self.cx_oracle_ver = self._parse_cx_oracle_ver(cx_Oracle.version)
if self.cx_oracle_ver < (5, 2) and self.cx_oracle_ver > (0, 0, 0):
raise exc.InvalidRequestError(
"cx_Oracle version 5.2 and above are supported"
)
self._include_setinputsizes = {
cx_Oracle.DATETIME,
cx_Oracle.NCLOB,
cx_Oracle.CLOB,
cx_Oracle.LOB,
cx_Oracle.NCHAR,
cx_Oracle.FIXED_NCHAR,
cx_Oracle.BLOB,
cx_Oracle.FIXED_CHAR,
cx_Oracle.TIMESTAMP,
_OracleInteger,
_OracleBINARY_FLOAT,
_OracleBINARY_DOUBLE,
}
self._paramval = lambda value: value.getvalue()
# https://github.com/oracle/python-cx_Oracle/issues/176#issuecomment-386821291
# https://github.com/oracle/python-cx_Oracle/issues/224
self._values_are_lists = self.cx_oracle_ver >= (6, 3)
if self._values_are_lists:
cx_Oracle.__future__.dml_ret_array_val = True
def _returningval(value):
try:
return value.values[0][0]
except IndexError:
return None
self._returningval = _returningval
else:
self._returningval = self._paramval
self._is_cx_oracle_6 = self.cx_oracle_ver >= (6,)
@property
def _cursor_var_unicode_kwargs(self):
if self.encoding_errors:
if self.cx_oracle_ver >= (6, 4):
return {"encodingErrors": self.encoding_errors}
else:
util.warn(
"cx_oracle version %r does not support encodingErrors"
% (self.cx_oracle_ver,)
)
return {}
def _parse_cx_oracle_ver(self, version):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
else:
return (0, 0, 0)
@classmethod
def dbapi(cls):
import cx_Oracle
return cx_Oracle
def initialize(self, connection):
super(OracleDialect_cx_oracle, self).initialize(connection)
if self._is_oracle_8:
self.supports_unicode_binds = False
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
# we have the option to change this setting upon connect,
# or just look at what it is upon connect and convert.
# to minimize the chance of interference with changes to
# NLS_TERRITORY or formatting behavior of the DB, we opt
# to just look at it
self._decimal_char = connection.scalar(
"select value from nls_session_parameters "
"where parameter = 'NLS_NUMERIC_CHARACTERS'"
)[0]
if self._decimal_char != ".":
_detect_decimal = self._detect_decimal
_to_decimal = self._to_decimal
self._detect_decimal = lambda value: _detect_decimal(
value.replace(self._decimal_char, ".")
)
self._to_decimal = lambda value: _to_decimal(
value.replace(self._decimal_char, ".")
)
def _detect_decimal(self, value):
if "." in value:
return self._to_decimal(value)
else:
return int(value)
_to_decimal = decimal.Decimal
def _generate_connection_outputtype_handler(self):
"""establish the default outputtypehandler established at the
connection level.
"""
dialect = self
cx_Oracle = dialect.dbapi
number_handler = _OracleNUMBER(
asdecimal=True
)._cx_oracle_outputtypehandler(dialect)
float_handler = _OracleNUMBER(
asdecimal=False
)._cx_oracle_outputtypehandler(dialect)
def output_type_handler(
cursor, name, default_type, size, precision, scale
):
if (
default_type == cx_Oracle.NUMBER
and default_type is not cx_Oracle.NATIVE_FLOAT
):
if not dialect.coerce_to_decimal:
return None
elif precision == 0 and scale in (0, -127):
# ambiguous type, this occurs when selecting
# numbers from deep subqueries
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=dialect._detect_decimal,
arraysize=cursor.arraysize,
)
elif precision and scale > 0:
return number_handler(
cursor, name, default_type, size, precision, scale
)
else:
return float_handler(
cursor, name, default_type, size, precision, scale
)
# allow all strings to come back natively as Unicode
elif (
dialect.coerce_to_unicode
and default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR,)
and default_type is not cx_Oracle.CLOB
and default_type is not cx_Oracle.NCLOB
):
if compat.py2k:
outconverter = processors.to_unicode_processor_factory(
dialect.encoding, errors=dialect.encoding_errors
)
return cursor.var(
cx_Oracle.STRING,
size,
cursor.arraysize,
outconverter=outconverter,
)
else:
return cursor.var(
util.text_type,
size,
cursor.arraysize,
**dialect._cursor_var_unicode_kwargs
)
elif dialect.auto_convert_lobs and default_type in (
cx_Oracle.CLOB,
cx_Oracle.NCLOB,
):
if compat.py2k:
outconverter = processors.to_unicode_processor_factory(
dialect.encoding, errors=dialect.encoding_errors
)
return cursor.var(
cx_Oracle.LONG_STRING,
size,
cursor.arraysize,
outconverter=outconverter,
)
else:
return cursor.var(
cx_Oracle.LONG_STRING,
size,
cursor.arraysize,
**dialect._cursor_var_unicode_kwargs
)
elif dialect.auto_convert_lobs and default_type in (
cx_Oracle.BLOB,
):
return cursor.var(
cx_Oracle.LONG_BINARY, size, cursor.arraysize,
)
return output_type_handler
def on_connect(self):
output_type_handler = self._generate_connection_outputtype_handler()
def on_connect(conn):
conn.outputtypehandler = output_type_handler
return on_connect
def create_connect_args(self, url):
opts = dict(url.query)
# deprecated in 1.3
for opt in ("use_ansi", "auto_convert_lobs"):
if opt in opts:
util.warn_deprecated(
"cx_oracle dialect option %r should only be passed to "
"create_engine directly, not within the URL string" % opt
)
util.coerce_kw_type(opts, opt, bool)
setattr(self, opt, opts.pop(opt))
database = url.database
service_name = opts.pop("service_name", None)
if database or service_name:
# if we have a database, then we have a remote host
port = url.port
if port:
port = int(port)
else:
port = 1521
if database and service_name:
raise exc.InvalidRequestError(
'"service_name" option shouldn\'t '
'be used with a "database" part of the url'
)
if database:
makedsn_kwargs = {"sid": database}
if service_name:
makedsn_kwargs = {"service_name": service_name}
dsn = self.dbapi.makedsn(url.host, port, **makedsn_kwargs)
else:
# we have a local tnsname
dsn = url.host
if dsn is not None:
opts["dsn"] = dsn
if url.password is not None:
opts["password"] = url.password
if url.username is not None:
opts["user"] = url.username
if self._cx_oracle_threaded is not None:
opts.setdefault("threaded", self._cx_oracle_threaded)
def convert_cx_oracle_constant(value):
if isinstance(value, util.string_types):
try:
int_val = int(value)
except ValueError:
value = value.upper()
return getattr(self.dbapi, value)
else:
return int_val
else:
return value
util.coerce_kw_type(opts, "mode", convert_cx_oracle_constant)
util.coerce_kw_type(opts, "threaded", bool)
util.coerce_kw_type(opts, "events", bool)
util.coerce_kw_type(opts, "purity", convert_cx_oracle_constant)
return ([], opts)
def _get_server_version_info(self, connection):
return tuple(int(x) for x in connection.connection.version.split("."))
def is_disconnect(self, e, connection, cursor):
(error,) = e.args
if isinstance(
e, (self.dbapi.InterfaceError, self.dbapi.DatabaseError)
) and "not connected" in str(e):
return True
if hasattr(error, "code"):
# ORA-00028: your session has been killed
# ORA-03114: not connected to ORACLE
# ORA-03113: end-of-file on communication channel
# ORA-03135: connection lost contact
# ORA-01033: ORACLE initialization or shutdown in progress
# ORA-02396: exceeded maximum idle time, please connect again
# TODO: Others ?
return error.code in (28, 3114, 3113, 3135, 1033, 2396)
else:
return False
@util.deprecated(
"1.2",
"The create_xid() method of the cx_Oracle dialect is deprecated and "
"will be removed in a future release. "
"Two-phase transaction support is no longer functional "
"in SQLAlchemy's cx_Oracle dialect as of cx_Oracle 6.0b1, which no "
"longer supports the API that SQLAlchemy relied upon.",
)
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified.
"""
id_ = random.randint(0, 2 ** 128)
return (0x1234, "%032x" % id_, "%032x" % 9)
def do_executemany(self, cursor, statement, parameters, context=None):
if isinstance(parameters, tuple):
parameters = list(parameters)
cursor.executemany(statement, parameters)
def do_begin_twophase(self, connection, xid):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
result = connection.connection.prepare()
connection.info["cx_oracle_prepared"] = result
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
self.do_rollback(connection.connection)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_commit(connection.connection)
else:
oci_prepared = connection.info["cx_oracle_prepared"]
if oci_prepared:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
connection.info.pop("cx_oracle_prepared", None)
def set_isolation_level(self, connection, level):
if hasattr(connection, "connection"):
dbapi_connection = connection.connection
else:
dbapi_connection = connection
if level == "AUTOCOMMIT":
dbapi_connection.autocommit = True
else:
dbapi_connection.autocommit = False
super(OracleDialect_cx_oracle, self).set_isolation_level(
dbapi_connection, level
)
dialect = OracleDialect_cx_oracle
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/oracle/base.py | # oracle/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: oracle
:name: Oracle
Oracle version 8 through current (11g at the time of this writing) are
supported.
Auto Increment Behavior
-----------------------
SQLAlchemy Table objects which include integer primary keys are usually
assumed to have "autoincrementing" behavior, meaning they can generate their
own primary key values upon INSERT. Since Oracle has no "autoincrement"
feature, SQLAlchemy relies upon sequences to produce these values. With the
Oracle dialect, *a sequence must always be explicitly specified to enable
autoincrement*. This is divergent with the majority of documentation
examples which assume the usage of an autoincrement-capable database. To
specify sequences, use the sqlalchemy.schema.Sequence object which is passed
to a Column construct::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
Column(...), ...
)
This step is also required when using table reflection, i.e. autoload=True::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
autoload=True
)
Transaction Isolation Level / Autocommit
----------------------------------------
The Oracle database supports "READ COMMITTED" and "SERIALIZABLE" modes
of isolation, however the SQLAlchemy Oracle dialect currently only has
explicit support for "READ COMMITTED". It is possible to emit a
"SET TRANSACTION" statement on a connection in order to use SERIALIZABLE
isolation, however the SQLAlchemy dialect will remain unaware of this setting,
such as if the :meth:`_engine.Connection.get_isolation_level` method is used;
this method is hardcoded to return "READ COMMITTED" right now.
The AUTOCOMMIT isolation level is also supported by the cx_Oracle dialect.
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="AUTOCOMMIT"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``AUTOCOMMIT``
.. versionadded:: 1.3.16 added support for AUTOCOMMIT to the cx_oracle dialect
as well as the notion of a default isolation level, currently harcoded
to "READ COMMITTED".
Identifier Casing
-----------------
In Oracle, the data dictionary represents all case insensitive identifier
names using UPPERCASE text. SQLAlchemy on the other hand considers an
all-lower case identifier name to be case insensitive. The Oracle dialect
converts all case insensitive identifiers to and from those two formats during
schema level communication, such as reflection of tables and indexes. Using
an UPPERCASE name on the SQLAlchemy side indicates a case sensitive
identifier, and SQLAlchemy will quote the name - this will cause mismatches
against data dictionary data received from Oracle, so unless identifier names
have been truly created as case sensitive (i.e. using quoted names), all
lowercase names should be used on the SQLAlchemy side.
.. _oracle_max_identifier_lengths:
Max Identifier Lengths
----------------------
Oracle has changed the default max identifier length as of Oracle Server
version 12.2. Prior to this version, the length was 30, and for 12.2 and
greater it is now 128. This change impacts SQLAlchemy in the area of
generated SQL label names as well as the generation of constraint names,
particularly in the case where the constraint naming convention feature
described at :ref:`constraint_naming_conventions` is being used.
To assist with this change and others, Oracle includes the concept of a
"compatibility" version, which is a version number that is independent of the
actual server version in order to assist with migration of Oracle databases,
and may be configured within the Oracle server itself. This compatibility
version is retrieved using the query ``SELECT value FROM v$parameter WHERE
name = 'compatible';``. The SQLAlchemy Oracle dialect, when tasked with
determining the default max identifier length, will attempt to use this query
upon first connect in order to determine the effective compatibility version of
the server, which determines what the maximum allowed identifier length is for
the server. If the table is not available, the server version information is
used instead.
For the duration of the SQLAlchemy 1.3 series, the default max identifier
length will remain at 30, even if compatibility version 12.2 or greater is in
use. When the newer version is detected, a warning will be emitted upon first
connect, which refers the user to make use of the
:paramref:`_sa.create_engine.max_identifier_length`
parameter in order to assure
forwards compatibility with SQLAlchemy 1.4, which will be changing this value
to 128 when compatibility version 12.2 or greater is detected.
Using :paramref:`_sa.create_engine.max_identifier_length`,
the effective identifier
length used by the SQLAlchemy dialect will be used as given, overriding the
current default value of 30, so that when Oracle 12.2 or greater is used, the
newer identifier length may be taken advantage of::
engine = create_engine(
"oracle+cx_oracle://scott:tiger@oracle122",
max_identifier_length=128)
The maximum identifier length comes into play both when generating anonymized
SQL labels in SELECT statements, but more crucially when generating constraint
names from a naming convention. It is this area that has created the need for
SQLAlchemy to change this default conservatively. For example, the following
naming convention produces two very different constraint names based on the
identifier length::
from sqlalchemy import Column
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.dialects import oracle
from sqlalchemy.schema import CreateIndex
m = MetaData(naming_convention={"ix": "ix_%(column_0N_name)s"})
t = Table(
"t",
m,
Column("some_column_name_1", Integer),
Column("some_column_name_2", Integer),
Column("some_column_name_3", Integer),
)
ix = Index(
None,
t.c.some_column_name_1,
t.c.some_column_name_2,
t.c.some_column_name_3,
)
oracle_dialect = oracle.dialect(max_identifier_length=30)
print(CreateIndex(ix).compile(dialect=oracle_dialect))
With an identifier length of 30, the above CREATE INDEX looks like::
CREATE INDEX ix_some_column_name_1s_70cd ON t
(some_column_name_1, some_column_name_2, some_column_name_3)
However with length=128, it becomes::
CREATE INDEX ix_some_column_name_1some_column_name_2some_column_name_3 ON t
(some_column_name_1, some_column_name_2, some_column_name_3)
The implication here is that by upgrading SQLAlchemy to version 1.4 on
an existing Oracle 12.2 or greater database, the generation of constraint
names will change, which can impact the behavior of database migrations.
A key example is a migration that wishes to "DROP CONSTRAINT" on a name that
was previously generated with the shorter length. This migration will fail
when the identifier length is changed without the name of the index or
constraint first being adjusted.
Therefore, applications are strongly advised to make use of
:paramref:`_sa.create_engine.max_identifier_length`
in order to maintain control
of the generation of truncated names, and to fully review and test all database
migrations in a staging environment when changing this value to ensure that the
impact of this change has been mitigated.
.. versionadded:: 1.3.9 Added the
:paramref:`_sa.create_engine.max_identifier_length` parameter; the Oracle
dialect now detects compatibility version 12.2 or greater and warns
about upcoming max identitifier length changes in SQLAlchemy 1.4.
LIMIT/OFFSET Support
--------------------
Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
is taken from
http://www.oracle.com/technetwork/issue-archive/2006/06-sep/o56asktom-086197.html .
There are two options which affect its behavior:
* the "FIRST ROWS()" optimization keyword is not used by default. To enable
the usage of this optimization directive, specify ``optimize_limits=True``
to :func:`_sa.create_engine`.
* the values passed for the limit/offset are sent as bound parameters. Some
users have observed that Oracle produces a poor query plan when the values
are sent as binds and not rendered literally. To render the limit/offset
values literally within the SQL statement, specify
``use_binds_for_limits=False`` to :func:`_sa.create_engine`.
Some users have reported better performance when the entirely different
approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to
provide LIMIT/OFFSET (note that the majority of users don't observe this).
To suit this case the method used for LIMIT/OFFSET can be replaced entirely.
See the recipe at
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
which installs a select compiler that overrides the generation of limit/offset
with a window function.
.. _oracle_returning:
RETURNING Support
-----------------
The Oracle database supports a limited form of RETURNING, in order to retrieve
result sets of matched rows from INSERT, UPDATE and DELETE statements.
Oracle's RETURNING..INTO syntax only supports one row being returned, as it
relies upon OUT parameters in order to function. In addition, supported
DBAPIs have further limitations (see :ref:`cx_oracle_returning`).
SQLAlchemy's "implicit returning" feature, which employs RETURNING within an
INSERT and sometimes an UPDATE statement in order to fetch newly generated
primary key values and other SQL defaults and expressions, is normally enabled
on the Oracle backend. By default, "implicit returning" typically only
fetches the value of a single ``nextval(some_seq)`` expression embedded into
an INSERT in order to increment a sequence within an INSERT statement and get
the value back at the same time. To disable this feature across the board,
specify ``implicit_returning=False`` to :func:`_sa.create_engine`::
engine = create_engine("oracle://scott:tiger@dsn",
implicit_returning=False)
Implicit returning can also be disabled on a table-by-table basis as a table
option::
# Core Table
my_table = Table("my_table", metadata, ..., implicit_returning=False)
# declarative
class MyClass(Base):
__tablename__ = 'my_table'
__table_args__ = {"implicit_returning": False}
.. seealso::
:ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on
implicit returning.
ON UPDATE CASCADE
-----------------
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based
solution is available at
http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
"deferrable=True, initially='deferred'" keyword arguments,
and specify "passive_updates=False" on each relationship().
Oracle 8 Compatibility
----------------------
When Oracle 8 is detected, the dialect internally configures itself to the
following behaviors:
* the use_ansi flag is set to False. This has the effect of converting all
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are
issued instead. This because these types don't seem to work correctly on
Oracle 8 even though they are available. The
:class:`~sqlalchemy.types.NVARCHAR` and
:class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate
NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
encodes all Python unicode objects to "string" before passing in as bind
parameters.
Synonym/DBLINK Reflection
-------------------------
When using reflection with Table objects, the dialect can optionally search
for tables indicated by synonyms, either in local or remote schemas or
accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as
a keyword argument to the :class:`_schema.Table` construct::
some_table = Table('some_table', autoload=True,
autoload_with=some_engine,
oracle_resolve_synonyms=True)
When this flag is set, the given name (such as ``some_table`` above) will
be searched not just in the ``ALL_TABLES`` view, but also within the
``ALL_SYNONYMS`` view to see if this name is actually a synonym to another
name. If the synonym is located and refers to a DBLINK, the oracle dialect
knows how to locate the table's information using DBLINK syntax(e.g.
``@dblink``).
``oracle_resolve_synonyms`` is accepted wherever reflection arguments are
accepted, including methods such as :meth:`_schema.MetaData.reflect` and
:meth:`_reflection.Inspector.get_columns`.
If synonyms are not in use, this flag should be left disabled.
.. _oracle_constraint_reflection:
Constraint Reflection
---------------------
The Oracle dialect can return information about foreign key, unique, and
CHECK constraints, as well as indexes on tables.
Raw information regarding these constraints can be acquired using
:meth:`_reflection.Inspector.get_foreign_keys`,
:meth:`_reflection.Inspector.get_unique_constraints`,
:meth:`_reflection.Inspector.get_check_constraints`, and
:meth:`_reflection.Inspector.get_indexes`.
.. versionchanged:: 1.2 The Oracle dialect can now reflect UNIQUE and
CHECK constraints.
When using reflection at the :class:`_schema.Table` level, the
:class:`_schema.Table`
will also include these constraints.
Note the following caveats:
* When using the :meth:`_reflection.Inspector.get_check_constraints` method,
Oracle
builds a special "IS NOT NULL" constraint for columns that specify
"NOT NULL". This constraint is **not** returned by default; to include
the "IS NOT NULL" constraints, pass the flag ``include_all=True``::
from sqlalchemy import create_engine, inspect
engine = create_engine("oracle+cx_oracle://s:t@dsn")
inspector = inspect(engine)
all_check_constraints = inspector.get_check_constraints(
"some_table", include_all=True)
* in most cases, when reflecting a :class:`_schema.Table`,
a UNIQUE constraint will
**not** be available as a :class:`.UniqueConstraint` object, as Oracle
mirrors unique constraints with a UNIQUE index in most cases (the exception
seems to be when two or more unique constraints represent the same columns);
the :class:`_schema.Table` will instead represent these using
:class:`.Index`
with the ``unique=True`` flag set.
* Oracle creates an implicit index for the primary key of a table; this index
is **excluded** from all index results.
* the list of columns reflected for an index will not include column names
that start with SYS_NC.
Table names with SYSTEM/SYSAUX tablespaces
-------------------------------------------
The :meth:`_reflection.Inspector.get_table_names` and
:meth:`_reflection.Inspector.get_temp_table_names`
methods each return a list of table names for the current engine. These methods
are also part of the reflection which occurs within an operation such as
:meth:`_schema.MetaData.reflect`. By default,
these operations exclude the ``SYSTEM``
and ``SYSAUX`` tablespaces from the operation. In order to change this, the
default list of tablespaces excluded can be changed at the engine level using
the ``exclude_tablespaces`` parameter::
# exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM
e = create_engine(
"oracle://scott:tiger@xe",
exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"])
.. versionadded:: 1.1
DateTime Compatibility
----------------------
Oracle has no datatype known as ``DATETIME``, it instead has only ``DATE``,
which can actually store a date and time value. For this reason, the Oracle
dialect provides a type :class:`_oracle.DATE` which is a subclass of
:class:`.DateTime`. This type has no special behavior, and is only
present as a "marker" for this type; additionally, when a database column
is reflected and the type is reported as ``DATE``, the time-supporting
:class:`_oracle.DATE` type is used.
.. versionchanged:: 0.9.4 Added :class:`_oracle.DATE` to subclass
:class:`.DateTime`. This is a change as previous versions
would reflect a ``DATE`` column as :class:`_types.DATE`, which subclasses
:class:`.Date`. The only significance here is for schemes that are
examining the type of column for use in special Python translations or
for migrating schemas to other database backends.
.. _oracle_table_options:
Oracle Table Options
-------------------------
The CREATE TABLE phrase supports the following options with Oracle
in conjunction with the :class:`_schema.Table` construct:
* ``ON COMMIT``::
Table(
"some_table", metadata, ...,
prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS')
.. versionadded:: 1.0.0
* ``COMPRESS``::
Table('mytable', metadata, Column('data', String(32)),
oracle_compress=True)
Table('mytable', metadata, Column('data', String(32)),
oracle_compress=6)
The ``oracle_compress`` parameter accepts either an integer compression
level, or ``True`` to use the default compression level.
.. versionadded:: 1.0.0
.. _oracle_index_options:
Oracle Specific Index Options
-----------------------------
Bitmap Indexes
~~~~~~~~~~~~~~
You can specify the ``oracle_bitmap`` parameter to create a bitmap index
instead of a B-tree index::
Index('my_index', my_table.c.data, oracle_bitmap=True)
Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not
check for such limitations, only the database will.
.. versionadded:: 1.0.0
Index compression
~~~~~~~~~~~~~~~~~
Oracle has a more efficient storage mode for indexes containing lots of
repeated values. Use the ``oracle_compress`` parameter to turn on key
compression::
Index('my_index', my_table.c.data, oracle_compress=True)
Index('my_index', my_table.c.data1, my_table.c.data2, unique=True,
oracle_compress=1)
The ``oracle_compress`` parameter accepts either an integer specifying the
number of prefix columns to compress, or ``True`` to use the default (all
columns for non-unique indexes, all but the last column for unique indexes).
.. versionadded:: 1.0.0
""" # noqa
from itertools import groupby
import re
from ... import Computed
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import expression
from ...sql import util as sql_util
from ...sql import visitors
from ...types import BLOB
from ...types import CHAR
from ...types import CLOB
from ...types import FLOAT
from ...types import INTEGER
from ...types import NCHAR
from ...types import NVARCHAR
from ...types import TIMESTAMP
from ...types import VARCHAR
RESERVED_WORDS = set(
"SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN "
"DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED "
"ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE "
"ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE "
"BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES "
"AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS "
"NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER "
"CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR "
"DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL".split()
)
NO_ARG_FNS = set(
"UID CURRENT_DATE SYSDATE USER " "CURRENT_TIME CURRENT_TIMESTAMP".split()
)
class RAW(sqltypes._Binary):
__visit_name__ = "RAW"
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = "NCLOB"
class VARCHAR2(VARCHAR):
__visit_name__ = "VARCHAR2"
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = "NUMBER"
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super(NUMBER, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal
)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
@property
def _type_affinity(self):
if bool(self.scale and self.scale > 0):
return sqltypes.Numeric
else:
return sqltypes.Integer
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = "DOUBLE_PRECISION"
class BINARY_DOUBLE(sqltypes.Float):
__visit_name__ = "BINARY_DOUBLE"
class BINARY_FLOAT(sqltypes.Float):
__visit_name__ = "BINARY_FLOAT"
class BFILE(sqltypes.LargeBinary):
__visit_name__ = "BFILE"
class LONG(sqltypes.Text):
__visit_name__ = "LONG"
class DATE(sqltypes.DateTime):
"""Provide the oracle DATE type.
This type has no special Python behavior, except that it subclasses
:class:`_types.DateTime`; this is to suit the fact that the Oracle
``DATE`` type supports a time value.
.. versionadded:: 0.9.4
"""
__visit_name__ = "DATE"
def _compare_type_affinity(self, other):
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
class INTERVAL(sqltypes.TypeEngine):
__visit_name__ = "INTERVAL"
def __init__(self, day_precision=None, second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs (cx_oracle and zxjdbc).
:param day_precision: the day precision value. this is the number of
digits to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the
number of digits to store for the fractional seconds field.
Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(
day_precision=interval.day_precision,
second_precision=interval.second_precision,
)
@property
def _type_affinity(self):
return sqltypes.Interval
class ROWID(sqltypes.TypeEngine):
"""Oracle ROWID type.
When used in a cast() or similar, generates ROWID.
"""
__visit_name__ = "ROWID"
class _OracleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
colspecs = {
sqltypes.Boolean: _OracleBoolean,
sqltypes.Interval: INTERVAL,
sqltypes.DateTime: DATE,
}
ischema_names = {
"VARCHAR2": VARCHAR,
"NVARCHAR2": NVARCHAR,
"CHAR": CHAR,
"NCHAR": NCHAR,
"DATE": DATE,
"NUMBER": NUMBER,
"BLOB": BLOB,
"BFILE": BFILE,
"CLOB": CLOB,
"NCLOB": NCLOB,
"TIMESTAMP": TIMESTAMP,
"TIMESTAMP WITH TIME ZONE": TIMESTAMP,
"INTERVAL DAY TO SECOND": INTERVAL,
"RAW": RAW,
"FLOAT": FLOAT,
"DOUBLE PRECISION": DOUBLE_PRECISION,
"LONG": LONG,
"BINARY_DOUBLE": BINARY_DOUBLE,
"BINARY_FLOAT": BINARY_FLOAT,
}
class OracleTypeCompiler(compiler.GenericTypeCompiler):
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
def visit_datetime(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_unicode(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NVARCHAR2(type_, **kw)
else:
return self.visit_VARCHAR2(type_, **kw)
def visit_INTERVAL(self, type_, **kw):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None
and "(%d)" % type_.day_precision
or "",
type_.second_precision is not None
and "(%d)" % type_.second_precision
or "",
)
def visit_LONG(self, type_, **kw):
return "LONG"
def visit_TIMESTAMP(self, type_, **kw):
if type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
def visit_DOUBLE_PRECISION(self, type_, **kw):
return self._generate_numeric(type_, "DOUBLE PRECISION", **kw)
def visit_BINARY_DOUBLE(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_DOUBLE", **kw)
def visit_BINARY_FLOAT(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_FLOAT", **kw)
def visit_FLOAT(self, type_, **kw):
# don't support conversion between decimal/binary
# precision yet
kw["no_precision"] = True
return self._generate_numeric(type_, "FLOAT", **kw)
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
def _generate_numeric(
self, type_, name, precision=None, scale=None, no_precision=False, **kw
):
if precision is None:
precision = type_.precision
if scale is None:
scale = getattr(type_, "scale", None)
if no_precision or precision is None:
return name
elif scale is None:
n = "%(name)s(%(precision)s)"
return n % {"name": name, "precision": precision}
else:
n = "%(name)s(%(precision)s, %(scale)s)"
return n % {"name": name, "precision": precision, "scale": scale}
def visit_string(self, type_, **kw):
return self.visit_VARCHAR2(type_, **kw)
def visit_VARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "", "2")
def visit_NVARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "N", "2")
visit_NVARCHAR = visit_NVARCHAR2
def visit_VARCHAR(self, type_, **kw):
return self._visit_varchar(type_, "", "")
def _visit_varchar(self, type_, n, num):
if not type_.length:
return "%(n)sVARCHAR%(two)s" % {"two": num, "n": n}
elif not n and self.dialect._supports_char_length:
varchar = "VARCHAR%(two)s(%(length)s CHAR)"
return varchar % {"length": type_.length, "two": num}
else:
varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
return varchar % {"length": type_.length, "two": num, "n": n}
def visit_text(self, type_, **kw):
return self.visit_CLOB(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NCLOB(type_, **kw)
else:
return self.visit_CLOB(type_, **kw)
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_NUMBER(type_, precision=19, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_RAW(self, type_, **kw):
if type_.length:
return "RAW(%(length)s)" % {"length": type_.length}
else:
return "RAW"
def visit_ROWID(self, type_, **kw):
return "ROWID"
class OracleCompiler(compiler.SQLCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{expression.CompoundSelect.EXCEPT: "MINUS"},
)
def __init__(self, *args, **kwargs):
self.__wheres = {}
self._quoted_bind_names = {}
super(OracleCompiler, self).__init__(*args, **kwargs)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_char_length_func(self, fn, **kw):
return "LENGTH" + self.function_argspec(fn, **kw)
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def get_cte_preamble(self, recursive):
return "WITH"
def get_select_hint_text(self, byfroms):
return " ".join("/*+ %s */" % text for table, text in byfroms.items())
def function_argspec(self, fn, **kw):
if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def visit_join(self, join, **kwargs):
if self.dialect.use_ansi:
return compiler.SQLCompiler.visit_join(self, join, **kwargs)
else:
kwargs["asfrom"] = True
if isinstance(join.right, expression.FromGrouping):
right = join.right.element
else:
right = join.right
return (
self.process(join.left, **kwargs)
+ ", "
+ self.process(right, **kwargs)
)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
def visit_join(join):
if join.isouter:
# https://docs.oracle.com/database/121/SQLRF/queries006.htm#SQLRF52354
# "apply the outer join operator (+) to all columns of B in
# the join condition in the WHERE clause" - that is,
# unconditionally regardless of operator or the other side
def visit_binary(binary):
if isinstance(
binary.left, expression.ColumnClause
) and join.right.is_derived_from(binary.left.table):
binary.left = _OuterJoinColumn(binary.left)
elif isinstance(
binary.right, expression.ColumnClause
) and join.right.is_derived_from(binary.right.table):
binary.right = _OuterJoinColumn(binary.right)
clauses.append(
visitors.cloned_traverse(
join.onclause, {}, {"binary": visit_binary}
)
)
else:
clauses.append(join.onclause)
for j in join.left, join.right:
if isinstance(j, expression.Join):
visit_join(j)
elif isinstance(j, expression.FromGrouping):
visit_join(j.element)
for f in froms:
if isinstance(f, expression.Join):
visit_join(f)
if not clauses:
return None
else:
return sql.and_(*clauses)
def visit_outer_join_column(self, vc, **kw):
return self.process(vc.column, **kw) + "(+)"
def visit_sequence(self, seq, **kw):
return (
self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
)
def get_render_as_alias_suffix(self, alias_name_text):
"""Oracle doesn't like ``FROM table AS alias``"""
return " " + alias_name_text
def returning_clause(self, stmt, returning_cols):
columns = []
binds = []
for i, column in enumerate(
expression._select_iterables(returning_cols)
):
if self.isupdate and isinstance(column.server_default, Computed):
util.warn(
"Computed columns don't work with Oracle UPDATE "
"statements that use RETURNING; the value of the column "
"*before* the UPDATE takes place is returned. It is "
"advised to not use RETURNING with an Oracle computed "
"column. Consider setting implicit_returning to False on "
"the Table object in order to avoid implicit RETURNING "
"clauses from being generated for this Table."
)
if column.type._has_column_expression:
col_expr = column.type.column_expression(column)
else:
col_expr = column
outparam = sql.outparam("ret_%d" % i, type_=column.type)
self.binds[outparam.key] = outparam
binds.append(
self.bindparam_string(self._truncate_bindparam(outparam))
)
columns.append(self.process(col_expr, within_columns_clause=False))
self._add_to_result_map(
getattr(col_expr, "name", col_expr.anon_label),
getattr(col_expr, "name", col_expr.anon_label),
(
column,
getattr(column, "name", None),
getattr(column, "key", None),
),
column.type,
)
return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds)
def _TODO_visit_compound_select(self, select):
"""Need to determine how to get ``LIMIT``/``OFFSET`` into a
``UNION`` for Oracle.
"""
pass
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``rownum`` criterion.
"""
if not getattr(select, "_oracle_visit", None):
if not self.dialect.use_ansi:
froms = self._display_froms_for_select(
select, kwargs.get("asfrom", False)
)
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
select._oracle_visit = True
limit_clause = select._limit_clause
offset_clause = select._offset_clause
if limit_clause is not None or offset_clause is not None:
# See http://www.oracle.com/technology/oramag/oracle/06-sep/\
# o56asktom.html
#
# Generalized form of an Oracle pagination query:
# select ... from (
# select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from
# ( select distinct ... where ... order by ...
# ) where ROWNUM <= :limit+:offset
# ) where ora_rn > :offset
# Outer select and "ROWNUM as ora_rn" can be dropped if
# limit=0
kwargs["select_wraps_for"] = select
select = select._generate()
select._oracle_visit = True
# Wrap the middle select and add the hint
limitselect = sql.select([c for c in select.c])
if (
limit_clause is not None
and self.dialect.optimize_limits
and select._simple_int_limit
):
limitselect = limitselect.prefix_with(
"/*+ FIRST_ROWS(%d) */" % select._limit
)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
# add expressions to accommodate FOR UPDATE OF
for_update = select._for_update_arg
if for_update is not None and for_update.of:
for_update = for_update._clone()
for_update._copy_internals()
for elem in for_update.of:
select.append_column(elem)
adapter = sql_util.ClauseAdapter(select)
for_update.of = [
adapter.traverse(elem) for elem in for_update.of
]
# If needed, add the limiting clause
if limit_clause is not None:
if not self.dialect.use_binds_for_limits:
# use simple int limits, will raise an exception
# if the limit isn't specified this way
max_row = select._limit
if offset_clause is not None:
max_row += select._offset
max_row = sql.literal_column("%d" % max_row)
else:
max_row = limit_clause
if offset_clause is not None:
max_row = max_row + offset_clause
limitselect.append_whereclause(
sql.literal_column("ROWNUM") <= max_row
)
# If needed, add the ora_rn, and wrap again with offset.
if offset_clause is None:
limitselect._for_update_arg = for_update
select = limitselect
else:
limitselect = limitselect.column(
sql.literal_column("ROWNUM").label("ora_rn")
)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
offsetselect = sql.select(
[c for c in limitselect.c if c.key != "ora_rn"]
)
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
if for_update is not None and for_update.of:
for elem in for_update.of:
if limitselect.corresponding_column(elem) is None:
limitselect.append_column(elem)
if not self.dialect.use_binds_for_limits:
offset_clause = sql.literal_column(
"%d" % select._offset
)
offsetselect.append_whereclause(
sql.literal_column("ora_rn") > offset_clause
)
offsetselect._for_update_arg = for_update
select = offsetselect
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def limit_clause(self, select, **kw):
return ""
def visit_empty_set_expr(self, type_):
return "SELECT 1 FROM DUAL WHERE 1!=1"
def for_update_clause(self, select, **kw):
if self.is_subquery():
return ""
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tmp += " OF " + ", ".join(
self.process(elem, **kw) for elem in select._for_update_arg.of
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "DECODE(%s, %s, 0, 1) = 1" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "DECODE(%s, %s, 0, 1) = 0" % (
self.process(binary.left),
self.process(binary.right),
)
class OracleDDLCompiler(compiler.DDLCompiler):
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
# its only available via triggers
# http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
"Oracle does not contain native UPDATE CASCADE "
"functionality - onupdates will not be rendered for foreign "
"keys. Consider using deferrable=True, initially='deferred' "
"or triggers."
)
return text
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS ''" % self.preparer.format_table(
drop.element
)
def visit_create_index(self, create):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
if index.dialect_options["oracle"]["bitmap"]:
text += "BITMAP "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=True),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
if index.dialect_options["oracle"]["compress"] is not False:
if index.dialect_options["oracle"]["compress"] is True:
text += " COMPRESS"
else:
text += " COMPRESS %d" % (
index.dialect_options["oracle"]["compress"]
)
return text
def post_create_table(self, table):
table_opts = []
opts = table.dialect_options["oracle"]
if opts["on_commit"]:
on_commit_options = opts["on_commit"].replace("_", " ").upper()
table_opts.append("\n ON COMMIT %s" % on_commit_options)
if opts["compress"]:
if opts["compress"] is True:
table_opts.append("\n COMPRESS")
else:
table_opts.append("\n COMPRESS FOR %s" % (opts["compress"]))
return "".join(table_opts)
def visit_computed_column(self, generated):
text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
if generated.persisted is True:
raise exc.CompileError(
"Oracle computed columns do not support 'stored' persistence; "
"set the 'persisted' flag to None or False for Oracle support."
)
elif generated.persisted is False:
text += " VIRTUAL"
return text
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = {x.lower() for x in RESERVED_WORDS}
illegal_initial_characters = {str(dig) for dig in range(0, 10)}.union(
["_", "$"]
)
def _bindparam_requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (
lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
)
def format_savepoint(self, savepoint):
name = savepoint.ident.lstrip("_")
return super(OracleIdentifierPreparer, self).format_savepoint(
savepoint, name
)
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(
"SELECT "
+ self.dialect.identifier_preparer.format_sequence(seq)
+ ".nextval FROM DUAL",
type_,
)
class OracleDialect(default.DefaultDialect):
name = "oracle"
supports_alter = True
supports_unicode_statements = False
supports_unicode_binds = False
max_identifier_length = 30
supports_simple_order_by_label = False
cte_follows_insert = True
supports_sequences = True
sequences_optional = False
postfetch_lastrowid = False
default_paramstyle = "named"
colspecs = colspecs
ischema_names = ischema_names
requires_name_normalize = True
supports_comments = True
supports_default_values = False
supports_empty_insert = False
statement_compiler = OracleCompiler
ddl_compiler = OracleDDLCompiler
type_compiler = OracleTypeCompiler
preparer = OracleIdentifierPreparer
execution_ctx_cls = OracleExecutionContext
reflection_options = ("oracle_resolve_synonyms",)
_use_nchar_for_unicode = False
construct_arguments = [
(
sa_schema.Table,
{"resolve_synonyms": False, "on_commit": None, "compress": False},
),
(sa_schema.Index, {"bitmap": False, "compress": False}),
]
def __init__(
self,
use_ansi=True,
optimize_limits=False,
use_binds_for_limits=True,
use_nchar_for_unicode=False,
exclude_tablespaces=("SYSTEM", "SYSAUX"),
**kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self._use_nchar_for_unicode = use_nchar_for_unicode
self.use_ansi = use_ansi
self.optimize_limits = optimize_limits
self.use_binds_for_limits = use_binds_for_limits
self.exclude_tablespaces = exclude_tablespaces
def initialize(self, connection):
super(OracleDialect, self).initialize(connection)
self.implicit_returning = self.__dict__.get(
"implicit_returning", self.server_version_info > (10,)
)
if self._is_oracle_8:
self.colspecs = self.colspecs.copy()
self.colspecs.pop(sqltypes.Interval)
self.use_ansi = False
def _get_effective_compat_server_version_info(self, connection):
# dialect does not need compat levels below 12.2, so don't query
# in those cases
if self.server_version_info < (12, 2):
return self.server_version_info
try:
compat = connection.execute(
"SELECT value FROM v$parameter WHERE name = 'compatible'"
).scalar()
except exc.DBAPIError:
compat = None
if compat:
try:
return tuple(int(x) for x in compat.split("."))
except:
return self.server_version_info
else:
return self.server_version_info
@property
def _is_oracle_8(self):
return self.server_version_info and self.server_version_info < (9,)
@property
def _supports_table_compression(self):
return self.server_version_info and self.server_version_info >= (10, 1)
@property
def _supports_table_compress_for(self):
return self.server_version_info and self.server_version_info >= (11,)
@property
def _supports_char_length(self):
return not self._is_oracle_8
def do_release_savepoint(self, connection, name):
# Oracle does not support RELEASE SAVEPOINT
pass
def _check_max_identifier_length(self, connection):
if self._get_effective_compat_server_version_info(connection) >= (
12,
2,
):
util.warn(
"Oracle version %r is known to have a maximum "
"identifier length of 128, rather than the historical default "
"of 30. SQLAlchemy 1.4 will use 128 for this "
"database; please set max_identifier_length=128 "
"in create_engine() in order to "
"test the application with this new length, or set to 30 in "
"order to assure that 30 continues to be used. "
"In particular, pay close attention to the behavior of "
"database migrations as dynamically generated names may "
"change. See the section 'Max Identifier Lengths' in the "
"SQLAlchemy Oracle dialect documentation for background."
% ((self.server_version_info,))
)
# use the default
return None
def _check_unicode_returns(self, connection):
additional_tests = [
expression.cast(
expression.literal_column("'test nvarchar2 returns'"),
sqltypes.NVARCHAR(60),
)
]
return super(OracleDialect, self)._check_unicode_returns(
connection, additional_tests
)
_isolation_lookup = ["READ COMMITTED"]
def get_isolation_level(self, connection):
return "READ COMMITTED"
def set_isolation_level(self, connection, level):
# prior to adding AUTOCOMMIT support for cx_Oracle, the Oracle dialect
# had no notion of setting the isolation level. As Oracle
# does not have a straightforward way of getting the isolation level
# if a server-side transaction is not yet in progress, we currently
# hardcode to only support "READ COMMITTED" and "AUTOCOMMIT" at the
# cx_oracle level. See #5200.
pass
def has_table(self, connection, table_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text(
"SELECT table_name FROM all_tables "
"WHERE table_name = :name AND owner = :schema_name"
),
name=self.denormalize_name(table_name),
schema_name=self.denormalize_name(schema),
)
return cursor.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text(
"SELECT sequence_name FROM all_sequences "
"WHERE sequence_name = :name AND "
"sequence_owner = :schema_name"
),
name=self.denormalize_name(sequence_name),
schema_name=self.denormalize_name(schema),
)
return cursor.first() is not None
def _get_default_schema_name(self, connection):
return self.normalize_name(
connection.execute("SELECT USER FROM DUAL").scalar()
)
def _resolve_synonym(
self,
connection,
desired_owner=None,
desired_synonym=None,
desired_table=None,
):
"""search for a local synonym matching the given desired owner/name.
if desired_owner is None, attempts to locate a distinct owner.
returns the actual name, owner, dblink name, and synonym name if
found.
"""
q = (
"SELECT owner, table_owner, table_name, db_link, "
"synonym_name FROM all_synonyms WHERE "
)
clauses = []
params = {}
if desired_synonym:
clauses.append("synonym_name = :synonym_name")
params["synonym_name"] = desired_synonym
if desired_owner:
clauses.append("owner = :desired_owner")
params["desired_owner"] = desired_owner
if desired_table:
clauses.append("table_name = :tname")
params["tname"] = desired_table
q += " AND ".join(clauses)
result = connection.execute(sql.text(q), **params)
if desired_owner:
row = result.first()
if row:
return (
row["table_name"],
row["table_owner"],
row["db_link"],
row["synonym_name"],
)
else:
return None, None, None, None
else:
rows = result.fetchall()
if len(rows) > 1:
raise AssertionError(
"There are multiple tables visible to the schema, you "
"must specify owner"
)
elif len(rows) == 1:
row = rows[0]
return (
row["table_name"],
row["table_owner"],
row["db_link"],
row["synonym_name"],
)
else:
return None, None, None, None
@reflection.cache
def _prepare_reflection_args(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(schema),
desired_synonym=self.denormalize_name(table_name),
)
else:
actual_name, owner, dblink, synonym = None, None, None, None
if not actual_name:
actual_name = self.denormalize_name(table_name)
if dblink:
# using user_db_links here since all_db_links appears
# to have more restricted permissions.
# http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
# will need to hear from more users if we are doing
# the right thing here. See [ticket:2619]
owner = connection.scalar(
sql.text(
"SELECT username FROM user_db_links " "WHERE db_link=:link"
),
link=dblink,
)
dblink = "@" + dblink
elif not owner:
owner = self.denormalize_name(schema or self.default_schema_name)
return (actual_name, owner, dblink or "", synonym)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "SELECT username FROM all_users ORDER BY username"
cursor = connection.execute(s)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
# note that table_names() isn't loading DBLINKed or synonym'ed tables
if schema is None:
schema = self.default_schema_name
sql_str = "SELECT table_name FROM all_tables WHERE "
if self.exclude_tablespaces:
sql_str += (
"nvl(tablespace_name, 'no tablespace') "
"NOT IN (%s) AND "
% (", ".join(["'%s'" % ts for ts in self.exclude_tablespaces]))
)
sql_str += (
"OWNER = :owner " "AND IOT_NAME IS NULL " "AND DURATION IS NULL"
)
cursor = connection.execute(sql.text(sql_str), owner=schema)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
schema = self.denormalize_name(self.default_schema_name)
sql_str = "SELECT table_name FROM all_tables WHERE "
if self.exclude_tablespaces:
sql_str += (
"nvl(tablespace_name, 'no tablespace') "
"NOT IN (%s) AND "
% (", ".join(["'%s'" % ts for ts in self.exclude_tablespaces]))
)
sql_str += (
"OWNER = :owner "
"AND IOT_NAME IS NULL "
"AND DURATION IS NOT NULL"
)
cursor = connection.execute(sql.text(sql_str), owner=schema)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
cursor = connection.execute(s, owner=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
options = {}
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
params = {"table_name": table_name}
columns = ["table_name"]
if self._supports_table_compression:
columns.append("compression")
if self._supports_table_compress_for:
columns.append("compress_for")
text = (
"SELECT %(columns)s "
"FROM ALL_TABLES%(dblink)s "
"WHERE table_name = :table_name"
)
if schema is not None:
params["owner"] = schema
text += " AND owner = :owner "
text = text % {"dblink": dblink, "columns": ", ".join(columns)}
result = connection.execute(sql.text(text), **params)
enabled = dict(DISABLED=False, ENABLED=True)
row = result.first()
if row:
if "compression" in row and enabled.get(row.compression, False):
if "compress_for" in row:
options["oracle_compress"] = row.compress_for
else:
options["oracle_compress"] = True
return options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
columns = []
if self._supports_char_length:
char_length_col = "char_length"
else:
char_length_col = "data_length"
params = {"table_name": table_name}
text = """
SELECT col.column_name, col.data_type, col.%(char_length_col)s,
col.data_precision, col.data_scale, col.nullable,
col.data_default, com.comments, col.virtual_column\
FROM all_tab_cols%(dblink)s col
LEFT JOIN all_col_comments%(dblink)s com
ON col.table_name = com.table_name
AND col.column_name = com.column_name
AND col.owner = com.owner
WHERE col.table_name = :table_name
AND col.hidden_column = 'NO'
"""
if schema is not None:
params["owner"] = schema
text += " AND col.owner = :owner "
text += " ORDER BY col.column_id"
text = text % {"dblink": dblink, "char_length_col": char_length_col}
c = connection.execute(sql.text(text), **params)
for row in c:
colname = self.normalize_name(row[0])
orig_colname = row[0]
coltype = row[1]
length = row[2]
precision = row[3]
scale = row[4]
nullable = row[5] == "Y"
default = row[6]
comment = row[7]
generated = row[8]
if coltype == "NUMBER":
if precision is None and scale == 0:
coltype = INTEGER()
else:
coltype = NUMBER(precision, scale)
elif coltype == "FLOAT":
# TODO: support "precision" here as "binary_precision"
coltype = FLOAT()
elif coltype in ("VARCHAR2", "NVARCHAR2", "CHAR", "NCHAR"):
coltype = self.ischema_names.get(coltype)(length)
elif "WITH TIME ZONE" in coltype:
coltype = TIMESTAMP(timezone=True)
else:
coltype = re.sub(r"\(\d+\)", "", coltype)
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (coltype, colname)
)
coltype = sqltypes.NULLTYPE
if generated == "YES":
computed = dict(sqltext=default)
default = None
else:
computed = None
cdict = {
"name": colname,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"comment": comment,
}
if orig_colname.lower() == orig_colname:
cdict["quote"] = True
if computed is not None:
cdict["computed"] = computed
columns.append(cdict)
return columns
@reflection.cache
def get_table_comment(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
if not schema:
schema = self.default_schema_name
COMMENT_SQL = """
SELECT comments
FROM all_tab_comments
WHERE table_name = :table_name AND owner = :schema_name
"""
c = connection.execute(
sql.text(COMMENT_SQL), table_name=table_name, schema_name=schema
)
return {"text": c.scalar()}
@reflection.cache
def get_indexes(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
indexes = []
params = {"table_name": table_name}
text = (
"SELECT a.index_name, a.column_name, "
"\nb.index_type, b.uniqueness, b.compression, b.prefix_length "
"\nFROM ALL_IND_COLUMNS%(dblink)s a, "
"\nALL_INDEXES%(dblink)s b "
"\nWHERE "
"\na.index_name = b.index_name "
"\nAND a.table_owner = b.table_owner "
"\nAND a.table_name = b.table_name "
"\nAND a.table_name = :table_name "
)
if schema is not None:
params["schema"] = schema
text += "AND a.table_owner = :schema "
text += "ORDER BY a.index_name, a.column_position"
text = text % {"dblink": dblink}
q = sql.text(text)
rp = connection.execute(q, **params)
indexes = []
last_index_name = None
pk_constraint = self.get_pk_constraint(
connection,
table_name,
schema,
resolve_synonyms=resolve_synonyms,
dblink=dblink,
info_cache=kw.get("info_cache"),
)
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
enabled = dict(DISABLED=False, ENABLED=True)
oracle_sys_col = re.compile(r"SYS_NC\d+\$", re.IGNORECASE)
index = None
for rset in rp:
index_name_normalized = self.normalize_name(rset.index_name)
# skip primary key index. This is refined as of
# [ticket:5421]. Note that ALL_INDEXES.GENERATED will by "Y"
# if the name of this index was generated by Oracle, however
# if a named primary key constraint was created then this flag
# is false.
if (
pk_constraint
and index_name_normalized == pk_constraint["name"]
):
continue
if rset.index_name != last_index_name:
index = dict(
name=index_name_normalized,
column_names=[],
dialect_options={},
)
indexes.append(index)
index["unique"] = uniqueness.get(rset.uniqueness, False)
if rset.index_type in ("BITMAP", "FUNCTION-BASED BITMAP"):
index["dialect_options"]["oracle_bitmap"] = True
if enabled.get(rset.compression, False):
index["dialect_options"][
"oracle_compress"
] = rset.prefix_length
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
index["column_names"].append(
self.normalize_name(rset.column_name)
)
last_index_name = rset.index_name
return indexes
@reflection.cache
def _get_constraint_data(
self, connection, table_name, schema=None, dblink="", **kw
):
params = {"table_name": table_name}
text = (
"SELECT"
"\nac.constraint_name," # 0
"\nac.constraint_type," # 1
"\nloc.column_name AS local_column," # 2
"\nrem.table_name AS remote_table," # 3
"\nrem.column_name AS remote_column," # 4
"\nrem.owner AS remote_owner," # 5
"\nloc.position as loc_pos," # 6
"\nrem.position as rem_pos," # 7
"\nac.search_condition," # 8
"\nac.delete_rule" # 9
"\nFROM all_constraints%(dblink)s ac,"
"\nall_cons_columns%(dblink)s loc,"
"\nall_cons_columns%(dblink)s rem"
"\nWHERE ac.table_name = :table_name"
"\nAND ac.constraint_type IN ('R','P', 'U', 'C')"
)
if schema is not None:
params["owner"] = schema
text += "\nAND ac.owner = :owner"
text += (
"\nAND ac.owner = loc.owner"
"\nAND ac.constraint_name = loc.constraint_name"
"\nAND ac.r_owner = rem.owner(+)"
"\nAND ac.r_constraint_name = rem.constraint_name(+)"
"\nAND (rem.position IS NULL or loc.position=rem.position)"
"\nORDER BY ac.constraint_name, loc.position"
)
text = text % {"dblink": dblink}
rp = connection.execute(sql.text(text), **params)
constraint_data = rp.fetchall()
return constraint_data
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
pkeys = []
constraint_name = None
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
for row in constraint_data:
(
cons_name,
cons_type,
local_column,
remote_table,
remote_column,
remote_owner,
) = row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == "P":
if constraint_name is None:
constraint_name = self.normalize_name(cons_name)
pkeys.append(local_column)
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
requested_schema = schema # to check later on
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
"options": {},
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(
cons_name,
cons_type,
local_column,
remote_table,
remote_column,
remote_owner,
) = row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
cons_name = self.normalize_name(cons_name)
if cons_type == "R":
if remote_table is None:
# ticket 363
util.warn(
(
"Got 'None' querying 'table_name' from "
"all_cons_columns%(dblink)s - does the user have "
"proper rights to the table?"
)
% {"dblink": dblink}
)
continue
rec = fkeys[cons_name]
rec["name"] = cons_name
local_cols, remote_cols = (
rec["constrained_columns"],
rec["referred_columns"],
)
if not rec["referred_table"]:
if resolve_synonyms:
(
ref_remote_name,
ref_remote_owner,
ref_dblink,
ref_synonym,
) = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(remote_owner),
desired_table=self.denormalize_name(remote_table),
)
if ref_synonym:
remote_table = self.normalize_name(ref_synonym)
remote_owner = self.normalize_name(
ref_remote_owner
)
rec["referred_table"] = remote_table
if (
requested_schema is not None
or self.denormalize_name(remote_owner) != schema
):
rec["referred_schema"] = remote_owner
if row[9] != "NO ACTION":
rec["options"]["ondelete"] = row[9]
local_cols.append(local_column)
remote_cols.append(remote_column)
return list(fkeys.values())
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
unique_keys = filter(lambda x: x[1] == "U", constraint_data)
uniques_group = groupby(unique_keys, lambda x: x[0])
index_names = {
ix["name"]
for ix in self.get_indexes(connection, table_name, schema=schema)
}
return [
{
"name": name,
"column_names": cols,
"duplicates_index": name if name in index_names else None,
}
for name, cols in [
[
self.normalize_name(i[0]),
[self.normalize_name(x[2]) for x in i[1]],
]
for i in uniques_group
]
]
@reflection.cache
def get_view_definition(
self,
connection,
view_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(view_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
view_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
params = {"view_name": view_name}
text = "SELECT text FROM all_views WHERE view_name=:view_name"
if schema is not None:
text += " AND owner = :schema"
params["schema"] = schema
rp = connection.execute(sql.text(text), **params).scalar()
if rp:
if util.py2k:
rp = rp.decode(self.encoding)
return rp
else:
return None
@reflection.cache
def get_check_constraints(
self, connection, table_name, schema=None, include_all=False, **kw
):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
check_constraints = filter(lambda x: x[1] == "C", constraint_data)
return [
{"name": self.normalize_name(cons[0]), "sqltext": cons[8]}
for cons in check_constraints
if include_all or not re.match(r"..+?. IS NOT NULL$", cons[8])
]
class _OuterJoinColumn(sql.ClauseElement):
__visit_name__ = "outer_join_column"
def __init__(self, column):
self.column = column
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py | # sqlite/pysqlcipher.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlcipher
:name: pysqlcipher
:dbapi: pysqlcipher
:connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=<iter>]
:url: https://pypi.python.org/pypi/pysqlcipher
``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make
use of the `SQLCipher <https://www.zetetic.net/sqlcipher>`_ backend.
``pysqlcipher3`` is a fork of ``pysqlcipher`` for Python 3. This dialect
will attempt to import it if ``pysqlcipher`` is non-present.
.. versionadded:: 1.1.4 - added fallback import for pysqlcipher3
.. versionadded:: 0.9.9 - added pysqlcipher dialect
Driver
------
The driver here is the
`pysqlcipher <https://pypi.python.org/pypi/pysqlcipher>`_
driver, which makes use of the SQLCipher engine. This system essentially
introduces new PRAGMA commands to SQLite which allows the setting of a
passphrase and other encryption parameters, allowing the database
file to be encrypted.
`pysqlcipher3` is a fork of `pysqlcipher` with support for Python 3,
the driver is the same.
Connect Strings
---------------
The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
Pooling Behavior
----------------
The driver makes a change to the default pool behavior of pysqlite
as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
has been observed to be significantly slower on connection than the
pysqlite driver, most likely due to the encryption overhead, so the
dialect here defaults to using the :class:`.SingletonThreadPool`
implementation,
instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
implementation is entirely configurable using the
:paramref:`_sa.create_engine.poolclass` parameter; the :class:`.StaticPool`
may
be more feasible for single-threaded use, or :class:`.NullPool` may be used
to prevent unencrypted connections from being held open for long periods of
time, at the expense of slower startup time for new connections.
""" # noqa
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ...engine import url as _url
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def dbapi(cls):
try:
from pysqlcipher import dbapi2 as sqlcipher
except ImportError as e:
try:
from pysqlcipher3 import dbapi2 as sqlcipher
except ImportError:
raise e
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop("passphrase", "")
pragmas = dict((key, cparams.pop(key, None)) for key in self.pragmas)
conn = super(SQLiteDialect_pysqlcipher, self).connect(
*cargs, **cparams
)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s="%s"' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername,
username=url.username,
host=url.host,
database=url.database,
query=url.query,
)
c_args, opts = super(
SQLiteDialect_pysqlcipher, self
).create_connect_args(super_url)
opts["passphrase"] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/__init__.py | # sqlite/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base # noqa
from . import pysqlcipher # noqa
from . import pysqlite # noqa
from .base import BLOB
from .base import BOOLEAN
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import DECIMAL
from .base import FLOAT
from .base import INTEGER
from .base import JSON
from .base import NUMERIC
from .base import REAL
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import VARCHAR
# default dialect
base.dialect = dialect = pysqlite.dialect
__all__ = (
"BLOB",
"BOOLEAN",
"CHAR",
"DATE",
"DATETIME",
"DECIMAL",
"FLOAT",
"INTEGER",
"JSON",
"NUMERIC",
"SMALLINT",
"TEXT",
"TIME",
"TIMESTAMP",
"VARCHAR",
"REAL",
"dialect",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/provision.py | import os
from ...engine import url as sa_url
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import log
from ...testing.provision import post_configure_engine
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
@follower_url_from_main.for_db("sqlite")
def _sqlite_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
if not url.database or url.database == ":memory:":
return url
else:
return sa_url.make_url("sqlite:///%s.db" % ident)
@post_configure_engine.for_db("sqlite")
def _sqlite_post_configure_engine(url, engine, follower_ident):
from sqlalchemy import event
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
# use file DBs in all cases, memory acts kind of strangely
# as an attached
if not follower_ident:
# note this test_schema.db gets created for all test runs.
# there's not any dedicated cleanup step for it. it in some
# ways corresponds to the "test.test_schema" schema that's
# expected to be already present, so for now it just stays
# in a given checkout directory.
dbapi_connection.execute(
'ATTACH DATABASE "test_schema.db" AS test_schema'
)
else:
dbapi_connection.execute(
'ATTACH DATABASE "%s_test_schema.db" AS test_schema'
% follower_ident
)
@create_db.for_db("sqlite")
def _sqlite_create_db(cfg, eng, ident):
pass
@drop_db.for_db("sqlite")
def _sqlite_drop_db(cfg, eng, ident):
for path in ["%s.db" % ident, "%s_test_schema.db" % ident]:
if os.path.exists(path):
log.info("deleting SQLite database file: %s" % path)
os.remove(path)
@temp_table_keyword_args.for_db("sqlite")
def _sqlite_temp_table_keyword_args(cfg, eng):
return {"prefixes": ["TEMPORARY"]}
@run_reap_dbs.for_db("sqlite")
def _reap_sqlite_dbs(url, idents):
log.info("db reaper connecting to %r", url)
log.info("identifiers in file: %s", ", ".join(idents))
for ident in idents:
# we don't have a config so we can't call _sqlite_drop_db due to the
# decorator
for path in ["%s.db" % ident, "%s_test_schema.db" % ident]:
if os.path.exists(path):
log.info("deleting SQLite database file: %s" % path)
os.remove(path)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/json.py | from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. seealso::
JSON1_
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/base.py | # sqlite/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite
:name: SQLite
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQLite is used. The implementation classes are
:class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <http://www.sqlite.org/datatype3.html#affinity>`_ -
in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among other things, this
means that any type name which contains the substring ``"INT"`` will be
determined to be of "integer affinity". A type named ``"BIGINT"``,
``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
of "integer" affinity. However, **the SQLite autoincrement feature, whether
implicitly or explicitly enabled, requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an application uses a type
like :class:`.BigInteger` for a primary key, on SQLite this type will need to
be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
TABLE`` statement in order for the autoincrement behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <http://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQLite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<http://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level / Autocommit
----------------------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the
`PRAGMA read_uncommitted <http://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`_sa.create_engine.isolation_level` parameter of
:func:`_sa.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
When using the pysqlite driver, the ``"AUTOCOMMIT"`` isolation level is also
available, which will alter the pysqlite connection using the ``.isolation_level``
attribute on the DBAPI connection and set it to None for the duration
of the setting.
.. versionadded:: 1.3.16 added support for SQLite AUTOCOMMIT isolation level
when using the pysqlite / sqlite3 SQLite driver.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`_engine.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transaction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_on_conflict_ddl:
ON CONFLICT support for constraints
-----------------------------------
SQLite supports a non-standard clause known as ON CONFLICT which can be applied
to primary key, unique, check, and not null constraints. In DDL, it is
rendered either within the "CONSTRAINT" clause or within the column definition
itself depending on the location of the target constraint. To render this
clause within DDL, the extension parameter ``sqlite_on_conflict`` can be
specified with a string conflict resolution algorithm within the
:class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`,
:class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object,
there
are individual parameters ``sqlite_on_conflict_not_null``,
``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each
correspond to the three types of relevant constraint types that can be
indicated from a :class:`_schema.Column` object.
.. seealso::
`ON CONFLICT <https://www.sqlite.org/lang_conflict.html>`_ - in the SQLite
documentation
.. versionadded:: 1.3
The ``sqlite_on_conflict`` parameters accept a string argument which is just
the resolution name to be chosen, which on SQLite can be one of ROLLBACK,
ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
that specifies the IGNORE algorithm::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
)
The above renders CREATE TABLE DDL as::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (id, data) ON CONFLICT IGNORE
)
When using the :paramref:`_schema.Column.unique`
flag to add a UNIQUE constraint
to a single column, the ``sqlite_on_conflict_unique`` parameter can
be added to the :class:`_schema.Column` as well, which will be added to the
UNIQUE constraint in the DDL::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, unique=True,
sqlite_on_conflict_unique='IGNORE')
)
rendering::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (data) ON CONFLICT IGNORE
)
To apply the FAIL algorithm for a NOT NULL constraint,
``sqlite_on_conflict_not_null`` is used::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, nullable=False,
sqlite_on_conflict_not_null='FAIL')
)
this renders the column inline ON CONFLICT phrase::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER NOT NULL ON CONFLICT FAIL,
PRIMARY KEY (id)
)
Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True,
sqlite_on_conflict_primary_key='FAIL')
)
SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
resolution algorithm is applied to the constraint itself::
CREATE TABLE some_table (
id INTEGER NOT NULL,
PRIMARY KEY (id) ON CONFLICT FAIL
)
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
http://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`_types.BIGINT`, :class:`_types.BLOB`,
:class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`,
:class:`_types.CHAR`, :class:`_types.DATE`,
:class:`_types.DATETIME`, :class:`_types.FLOAT`,
:class:`_types.DECIMAL`, :class:`_types.FLOAT`,
:class:`_types.INTEGER`, :class:`_types.INTEGER`,
:class:`_types.NUMERIC`, :class:`_types.REAL`,
:class:`_types.SMALLINT`, :class:`_types.TEXT`,
:class:`_types.TIME`, :class:`_types.TIMESTAMP`,
:class:`_types.VARCHAR`, :class:`_types.NVARCHAR`,
:class:`_types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`_types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`_types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`_types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`_types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`_types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
.. _sqlite_dotted_column_names:
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. versionchanged:: 1.1
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.execute("create table x (a integer, b integer)")
conn.execute("insert into x (a, b) values (1, 1)")
conn.execute("insert into x (a, b) values (2, 2)")
result = conn.execute("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`_engine.ResultProxy.keys` and :meth:`.RowProxy.keys()`
in the public API. In
the very specific case where an application is forced to use column names that
contain dots, and the functionality of :meth:`_engine.ResultProxy.keys` and
:meth:`.RowProxy.keys()` is required to return these dotted names unmodified,
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`_engine.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`_engine.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`_engine.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
""" # noqa
import datetime
import numbers
import re
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from ... import exc
from ... import processors
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import ColumnElement
from ...sql import compiler
from ...types import BLOB # noqa
from ...types import BOOLEAN # noqa
from ...types import CHAR # noqa
from ...types import DECIMAL # noqa
from ...types import FLOAT # noqa
from ...types import INTEGER # noqa
from ...types import NUMERIC # noqa
from ...types import REAL # noqa
from ...types import SMALLINT # noqa
from ...types import TEXT # noqa
from ...types import TIMESTAMP # noqa
from ...types import VARCHAR # noqa
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype):
default_processor = super(_SQliteJson, self).result_processor(
dialect, coltype
)
def process(value):
try:
return default_processor(value)
except TypeError:
if isinstance(value, numbers.Number):
return value
else:
raise
return process
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
return bool(re.search(r"[^0-9]", spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
r"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
""" # noqa
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
r"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
}
else:
raise TypeError(
"SQLite Date type only accepts Python "
"date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date
)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
r"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.JSON: _SQliteJson,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.FLOAT,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"JSON": JSON,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
},
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw),
)
except KeyError as err:
util.raise_(
exc.CompileError(
"%s is not a valid extract argument." % extract.field
),
replace_context=err,
)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ""
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "%s IS NOT %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
return "%s IS %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_empty_set_expr(self, element_types):
return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % (
", ".join("1" for type_ in element_types or [INTEGER()]),
", ".join("1" for type_ in element_types or [INTEGER()]),
)
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column
)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
if isinstance(column.server_default.arg, ColumnElement):
default = "(" + default + ")"
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_not_null"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
if column.primary_key:
if (
column.autoincrement is True
and len(column.table.primary_key.columns) != 1
):
raise exc.CompileError(
"SQLite does not support autoincrement for "
"composite primary keys"
)
if (
column.table.dialect_options["sqlite"]["autoincrement"]
and len(column.table.primary_key.columns) == 1
and issubclass(column.type._type_affinity, sqltypes.Integer)
and not column.foreign_keys
):
colspec += " PRIMARY KEY"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
colspec += " AUTOINCREMENT"
if column.computed is not None:
colspec += " " + self.process(column.computed)
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (
c.primary_key
and c.table.dialect_options["sqlite"]["autoincrement"]
and issubclass(c.type._type_affinity, sqltypes.Integer)
and not c.foreign_keys
):
return None
text = super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_unique_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_unique_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_unique"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_check_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_column_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_column_check_constraint(
constraint
)
if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
raise exc.CompileError(
"SQLite does not support on conflict clause for "
"column check constraint"
)
return text
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
constraint
)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=False),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
def visit_JSON(self, type_, **kw):
# note this name provides NUMERIC affinity, not TEXT.
# should not be an issue unless the JSON value consists of a single
# numeric value. JSONTEXT can be used if this case is required.
return "JSON"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set(
[
"add",
"after",
"all",
"alter",
"analyze",
"and",
"as",
"asc",
"attach",
"autoincrement",
"before",
"begin",
"between",
"by",
"cascade",
"case",
"cast",
"check",
"collate",
"column",
"commit",
"conflict",
"constraint",
"create",
"cross",
"current_date",
"current_time",
"current_timestamp",
"database",
"default",
"deferrable",
"deferred",
"delete",
"desc",
"detach",
"distinct",
"drop",
"each",
"else",
"end",
"escape",
"except",
"exclusive",
"exists",
"explain",
"false",
"fail",
"for",
"foreign",
"from",
"full",
"glob",
"group",
"having",
"if",
"ignore",
"immediate",
"in",
"index",
"indexed",
"initially",
"inner",
"insert",
"instead",
"intersect",
"into",
"is",
"isnull",
"join",
"key",
"left",
"like",
"limit",
"match",
"natural",
"not",
"notnull",
"null",
"of",
"offset",
"on",
"or",
"order",
"outer",
"plan",
"pragma",
"primary",
"query",
"raise",
"references",
"reindex",
"rename",
"replace",
"restrict",
"right",
"rollback",
"row",
"select",
"set",
"table",
"temp",
"temporary",
"then",
"to",
"transaction",
"trigger",
"true",
"union",
"unique",
"update",
"using",
"vacuum",
"values",
"view",
"virtual",
"when",
"where",
]
)
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return (
not self.dialect._broken_dotted_colnames
or self.execution_options.get("sqlite_raw_colnames", False)
)
def _translate_colname(self, colname):
# TODO: detect SQLite version 3.10.0 or greater;
# see [ticket:3633]
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = "sqlite"
supports_alter = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_default_values = True
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
tuple_in_values = True
default_paramstyle = "qmark"
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
isolation_level = None
construct_arguments = [
(sa_schema.Table, {"autoincrement": False}),
(sa_schema.Index, {"where": None}),
(
sa_schema.Column,
{
"on_conflict_primary_key": None,
"on_conflict_not_null": None,
"on_conflict_unique": None,
},
),
(sa_schema.Constraint, {"on_conflict": None}),
]
_broken_fk_pragma_quotes = False
_broken_dotted_colnames = False
@util.deprecated_params(
_json_serializer=(
"1.3.7",
"The _json_serializer argument to the SQLite dialect has "
"been renamed to the correct name of json_serializer. The old "
"argument name will be removed in a future release.",
),
_json_deserializer=(
"1.3.7",
"The _json_deserializer argument to the SQLite dialect has "
"been renamed to the correct name of json_deserializer. The old "
"argument name will be removed in a future release.",
),
)
def __init__(
self,
isolation_level=None,
native_datetime=False,
json_serializer=None,
json_deserializer=None,
_json_serializer=None,
_json_deserializer=None,
**kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
if _json_serializer:
json_serializer = _json_serializer
if _json_deserializer:
json_deserializer = _json_deserializer
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
self.supports_right_nested_joins = (
self.dbapi.sqlite_version_info >= (3, 7, 16)
)
self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
3,
10,
0,
)
self.supports_default_values = self.dbapi.sqlite_version_info >= (
3,
3,
8,
)
self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
self.supports_multivalues_insert = (
# http://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info
>= (3, 7, 11)
)
# see http://www.sqlalchemy.org/trac/ticket/2568
# as well as http://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
3,
6,
14,
)
_isolation_lookup = {"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
def set_isolation_level(self, connection, level):
try:
isolation_level = self._isolation_lookup[level.replace("_", " ")]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
),
replace_context=err,
)
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted")
res = cursor.fetchone()
if res:
value = res[0]
else:
# http://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "PRAGMA database_list"
dl = connection.execute(s)
return [db[1] for db in dl if db[1] != "temp"]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (
master,
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='table' ORDER BY name "
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='view' ORDER BY name "
)
rs = connection.execute(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
return bool(info)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (
master,
)
rs = connection.execute(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
s = ("SELECT sql FROM %s WHERE name = '%s'" "AND type='view'") % (
master,
view_name,
)
rs = connection.execute(s)
else:
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = '%s' "
"AND type='view'"
) % view_name
rs = connection.execute(s)
except exc.DBAPIError:
s = (
"SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type='view'"
) % view_name
rs = connection.execute(s)
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
pragma = "table_info"
# computed columns are threaded as hidden, they require table_xinfo
if self.server_version_info >= (3, 31):
pragma = "table_xinfo"
info = self._get_table_pragma(
connection, pragma, table_name, schema=schema
)
columns = []
tablesql = None
for row in info:
name = row[1]
type_ = row[2].upper()
nullable = not row[3]
default = row[4]
primary_key = row[5]
hidden = row[6] if pragma == "table_xinfo" else 0
# hidden has value 0 for normal columns, 1 for hidden columns,
# 2 for computed virtual columns and 3 for computed stored columns
# https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b
if hidden == 1:
continue
generated = bool(hidden)
persisted = hidden == 3
if tablesql is None and generated:
tablesql = self._get_table_sql(
connection, table_name, schema, **kw
)
columns.append(
self._get_column_info(
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
)
)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
):
if generated:
# the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)"
# somehow is "INTEGER GENERATED ALWAYS"
type_ = re.sub("generated", "", type_, flags=re.IGNORECASE)
type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip()
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = util.text_type(default)
colspec = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"primary_key": primary_key,
}
if generated:
sqltext = ""
if tablesql:
pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?"
match = re.search(
re.escape(name) + pattern, tablesql, re.IGNORECASE
)
if match:
sqltext = match.group(1)
colspec["computed"] = {"sqltext": sqltext, "persisted": persisted}
return colspec
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity tules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in http://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by regcognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r"([\w ]+)(\(.*?\))?", type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ""
args = ""
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif "INT" in coltype:
coltype = sqltypes.INTEGER
elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
coltype = sqltypes.TEXT
elif "BLOB" in coltype or not coltype:
coltype = sqltypes.NullType
elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r"(\d+)", args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments."
% (coltype, args)
)
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
constraint_name = None
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data:
PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
result = re.search(PK_PATTERN, table_data, re.I)
constraint_name = result.group(1) if result else None
cols = self.get_columns(connection, table_name, schema, **kw)
pkeys = []
for col in cols:
if col["primary_key"]:
pkeys.append(col["name"])
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list", table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
if not rcol:
# no referred column, which means it was not named in the
# original DDL. The referred columns of the foreign key
# constraint are therefore the primary key of the referred
# table.
referred_pk = self.get_pk_constraint(
connection, rtbl, schema=schema, **kw
)
# note that if table doesnt exist, we still get back a record,
# just it has no columns in it
referred_columns = referred_pk["constrained_columns"]
else:
# note we use this list only if this is the first column
# in the constraint. for subsequent columns we ignore the
# list and append "rcol" if present.
referred_columns = []
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
"name": None,
"constrained_columns": [],
"referred_schema": schema,
"referred_table": rtbl,
"referred_columns": referred_columns,
"options": {},
}
fks[numerical_id] = fk
fk["constrained_columns"].append(lcol)
if rcol:
fk["referred_columns"].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return (
tuple(constrained_columns)
+ (referred_table,)
+ tuple(referred_columns)
)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk["constrained_columns"],
fk["referred_table"],
fk["referred_columns"],
),
fk,
)
for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
r"(?:CONSTRAINT (\w+) +)?"
r"FOREIGN KEY *\( *(.+?) *\) +"
r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
r"((?:ON (?:DELETE|UPDATE) "
r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name,
constrained_columns,
referred_quoted_name,
referred_name,
referred_columns,
onupdatedelete,
) = match.group(1, 2, 3, 4, 5, 6)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns)
)
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns)
)
referred_name = referred_quoted_name or referred_name
options = {}
for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
if token.startswith("DELETE"):
options["ondelete"] = token[6:].strip()
elif token.startswith("UPDATE"):
options["onupdate"] = token[6:].strip()
yield (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
)
fkeys = []
for (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
) in parse_fks():
sig = fk_sig(constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (sig, table_name)
)
continue
key = keys_by_signature.pop(sig)
key["name"] = constraint_name
key["options"] = options
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection,
table_name,
schema=schema,
include_auto_indexes=True,
**kw
):
if not idx["name"].startswith("sqlite_autoindex"):
continue
sig = tuple(idx["column_names"])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|([a-z0-9]+)) ' r"+[a-z0-9_ ]+? +UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2))
)
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {"name": name, "column_names": cols}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *"
check_constraints = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
for match in re.finditer(CHECK_PATTERN, table_data, re.I):
check_constraints.append(
{"sqltext": match.group(2), "name": match.group(1)}
)
return check_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema
)
indexes = []
include_auto_indexes = kw.pop("include_auto_indexes", False)
for row in pragma_indexes:
# ignore implicit primary key index.
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
if not include_auto_indexes and row[1].startswith(
"sqlite_autoindex"
):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in list(indexes):
pragma_index = self._get_table_pragma(
connection, "index_info", idx["name"]
)
for row in pragma_index:
if row[2] is None:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx["name"]
)
indexes.remove(idx)
break
else:
idx["column_names"].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
if schema:
schema_expr = "%s." % (
self.identifier_preparer.quote_identifier(schema)
)
else:
schema_expr = ""
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM %(schema)ssqlite_master UNION ALL "
" SELECT * FROM %(schema)ssqlite_temp_master) "
"WHERE name = '%(table)s' "
"AND type = 'table'"
% {"schema": schema_expr, "table": table_name}
)
rs = connection.execute(s)
except exc.DBAPIError:
s = (
"SELECT sql FROM %(schema)ssqlite_master "
"WHERE name = '%(table)s' "
"AND type = 'table'"
% {"schema": schema_expr, "table": table_name}
)
rs = connection.execute(s)
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statements = ["PRAGMA %s." % quote(schema)]
else:
# because PRAGMA looks in all attached databases if no schema
# given, need to specify "main" schema, however since we want
# 'temp' tables in the same namespace as 'main', need to run
# the PRAGMA twice
statements = ["PRAGMA main.", "PRAGMA temp."]
qtable = quote(table_name)
for statement in statements:
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.execute(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# http://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
if result:
return result
else:
return []
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py | # sqlite/pysqlite.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: http://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
The ``sqlite3`` Python DBAPI is standard on all modern Python versions;
for cPython and Pypy, no additional installation is necessary.
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\path\\to\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
.. _pysqlite_uri_connections:
URI Connections
^^^^^^^^^^^^^^^
Modern versions of SQLite support an alternative system of connecting using a
`driver level URI <https://www.sqlite.org/uri.html>`_, which has the advantage
that additional driver-level arguments can be passed including options such as
"read only". The Python sqlite3 driver supports this mode under modern Python
3 versions. The SQLAlchemy pysqlite driver supports this mode of use by
specifing "uri=true" in the URL query string. The SQLite-level "URI" is kept
as the "database" portion of the SQLAlchemy url (that is, following a slash)::
e = create_engine("sqlite:///file:path/to/database?mode=ro&uri=true")
.. note:: The "uri=true" parameter must appear in the **query string**
of the URL. It will not currently work as expected if it is only
present in the :paramref:`_sa.create_engine.connect_args`
parameter dictionary.
The logic reconciles the simultaneous presence of SQLAlchemy's query string and
SQLite's query string by separating out the parameters that belong to the
Python sqlite3 driver vs. those that belong to the SQLite URI. This is
achieved through the use of a fixed list of parameters known to be accepted by
the Python side of the driver. For example, to include a URL that indicates
the Python sqlite3 "timeout" and "check_same_thread" parameters, along with the
SQLite "mode" and "nolock" parameters, they can all be passed together on the
query string::
e = create_engine(
"sqlite:///file:path/to/database?"
"check_same_thread=true&timeout=10&mode=ro&nolock=1&uri=true"
)
Above, the pysqlite / sqlite3 DBAPI would be passed arguments as::
sqlite3.connect(
"file:path/to/database?mode=ro&nolock=1",
check_same_thread=True, timeout=10, uri=True
)
Regarding future parameters added to either the Python or native drivers. new
parameter names added to the SQLite URI scheme should be automatically
accommodated by this scheme. New parameter names added to the Python driver
side can be accommodated by specifying them in the
:paramref:`_sa.create_engine.connect_args` dictionary,
until dialect support is
added by SQLAlchemy. For the less likely case that the native SQLite driver
adds a new parameter name that overlaps with one of the existing, known Python
driver parameters (such as "timeout" perhaps), SQLAlchemy's dialect would
require adjustment for the URL scheme to continue to support this.
As is always the case for all SQLAlchemy dialects, the entire "URL" process
can be bypassed in :func:`_sa.create_engine` through the use of the
:paramref:`_sa.create_engine.creator`
parameter which allows for a custom callable
that creates a Python sqlite3 driver level connection directly.
.. versionadded:: 1.3.9
.. seealso::
`Uniform Resource Identifiers <https://www.sqlite.org/uri.html>`_ - in
the SQLite documentation
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older
versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that
pysqlite connections are still not safe to use in concurrently in multiple
threads. In particular, any statement execution calls would need to be
externally mutexed, as Pysqlite does not provide for thread-safe propagation
of error messages among other things. So while even ``:memory:`` databases
can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.NullPool` as the source of connections. This pool closes and
discards connections which are returned to the pool immediately. SQLite
file-based connections have extremely low overhead, so pooling is not
necessary. The scheme also prevents a connection from being used again in
a different thread and works best with SQLite's coarse-grained file locking.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets,
never plain strings, and accommodates ``unicode`` objects within bound
parameter values in all cases. Regardless of the SQLAlchemy string type in
use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
Dealing with Mixed String / Binary Columns in Python 3
------------------------------------------------------
The SQLite database is weakly typed, and as such it is possible when using
binary values, which in Python 3 are represented as ``b'some string'``, that a
particular SQLite database can have data values within different rows where
some of them will be returned as a ``b''`` value by the Pysqlite driver, and
others will be returned as Python strings, e.g. ``''`` values. This situation
is not known to occur if the SQLAlchemy :class:`.LargeBinary` datatype is used
consistently, however if a particular SQLite database has data that was
inserted using the Pysqlite driver directly, or when using the SQLAlchemy
:class:`.String` type which was later changed to :class:`.LargeBinary`, the
table will not be consistently readable because SQLAlchemy's
:class:`.LargeBinary` datatype does not handle strings so it has no way of
"encoding" a value that is in string format.
To deal with a SQLite table that has mixed string / binary data in the
same column, use a custom type that will check each row individually::
# note this is Python 3 only
from sqlalchemy import String
from sqlalchemy import TypeDecorator
class MixedBinary(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
if isinstance(value, str):
value = bytes(value, 'utf-8')
elif value is not None:
value = bytes(value)
return value
Then use the above ``MixedBinary`` datatype in the place where
:class:`.LargeBinary` would normally be used.
.. _pysqlite_serializable:
Serializable isolation / Savepoints / Transactional DDL
-------------------------------------------------------
In the section :ref:`sqlite_concurrency`, we refer to the pysqlite
driver's assortment of issues that prevent several features of SQLite
from working correctly. The pysqlite DBAPI driver has several
long-standing bugs which impact the correctness of its transactional
behavior. In its default mode of operation, SQLite features such as
SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are
non-functional, and in order to use these features, workarounds must
be taken.
The issue is essentially that the driver attempts to second-guess the user's
intent, failing to start transactions and sometimes ending them prematurely, in
an effort to minimize the SQLite databases's file locking behavior, even
though SQLite itself uses "shared" locks for read-only activities.
SQLAlchemy chooses to not alter this behavior by default, as it is the
long-expected behavior of the pysqlite driver; if and when the pysqlite
driver attempts to repair these issues, that will be more of a driver towards
defaults for SQLAlchemy.
The good news is that with a few events, we can implement transactional
support fully, by disabling pysqlite's feature entirely and emitting BEGIN
ourselves. This is achieved using two event listeners::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db")
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.execute("BEGIN")
.. warning:: When using the above recipe, it is advised to not use the
:paramref:`.Connection.execution_options.isolation_level` setting on
:class:`_engine.Connection` and :func:`_sa.create_engine`
with the SQLite driver,
as this function necessarily will also alter the ".isolation_level" setting.
Above, we intercept a new pysqlite connection and disable any transactional
integration. Then, at the point at which SQLAlchemy knows that transaction
scope is to begin, we emit ``"BEGIN"`` ourselves.
When we take control of ``"BEGIN"``, we can also control directly SQLite's
locking modes, introduced at
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_,
by adding the desired locking mode to our ``"BEGIN"``::
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN EXCLUSIVE")
.. seealso::
`BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_ -
on the SQLite site
`sqlite3 SELECT does not BEGIN a transaction <http://bugs.python.org/issue9924>`_ -
on the Python bug tracker
`sqlite3 module breaks transactions and potentially corrupts data <http://bugs.python.org/issue10740>`_ -
on the Python bug tracker
""" # noqa
import os
from .base import DATE
from .base import DATETIME
from .base import SQLiteDialect
from ... import exc
from ... import pool
from ... import types as sqltypes
from ... import util
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = "qmark"
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
},
)
if not util.py2k:
description_encoding = None
driver = "pysqlite"
@classmethod
def dbapi(cls):
if util.py2k:
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
try:
from sqlite3 import dbapi2 as sqlite
except ImportError as e:
raise e
else:
from sqlite3 import dbapi2 as sqlite
return sqlite
@classmethod
def _is_url_file_db(cls, url):
if url.database and url.database != ":memory:":
return True
else:
return False
@classmethod
def get_pool_class(cls, url):
if cls._is_url_file_db(url):
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def set_isolation_level(self, connection, level):
if hasattr(connection, "connection"):
dbapi_connection = connection.connection
else:
dbapi_connection = connection
if level == "AUTOCOMMIT":
dbapi_connection.isolation_level = None
else:
dbapi_connection.isolation_level = ""
return super(SQLiteDialect_pysqlite, self).set_isolation_level(
connection, level
)
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,)
)
# theoretically, this list can be augmented, at least as far as
# parameter names accepted by sqlite3/pysqlite, using
# inspect.getfullargspec(). for the moment this seems like overkill
# as these parameters don't change very often, and as always,
# parameters passed to connect_args will always go to the
# sqlite3/pysqlite driver.
pysqlite_args = [
("uri", bool),
("timeout", float),
("isolation_level", str),
("detect_types", int),
("check_same_thread", bool),
("cached_statements", int),
]
opts = url.query
pysqlite_opts = {}
for key, type_ in pysqlite_args:
util.coerce_kw_type(opts, key, type_, dest=pysqlite_opts)
if pysqlite_opts.get("uri", False):
uri_opts = opts.copy()
# here, we are actually separating the parameters that go to
# sqlite3/pysqlite vs. those that go the SQLite URI. What if
# two names conflict? again, this seems to be not the case right
# now, and in the case that new names are added to
# either side which overlap, again the sqlite3/pysqlite parameters
# can be passed through connect_args instead of in the URL.
# If SQLite native URIs add a parameter like "timeout" that
# we already have listed here for the python driver, then we need
# to adjust for that here.
for key, type_ in pysqlite_args:
uri_opts.pop(key, None)
filename = url.database
if uri_opts:
# sorting of keys is for unit test support
filename += "?" + (
"&".join(
"%s=%s" % (key, uri_opts[key])
for key in sorted(uri_opts)
)
)
else:
filename = url.database or ":memory:"
if filename != ":memory:":
filename = os.path.abspath(filename)
return ([filename], pysqlite_opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(
e, self.dbapi.ProgrammingError
) and "Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/enumerated.py | # mysql/enumerated.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .types import _StringType
from ... import exc
from ... import sql
from ... import util
from ...sql import sqltypes
class _EnumeratedValues(_StringType):
def _init_values(self, values, kw):
self.quoting = kw.pop("quoting", "auto")
if self.quoting == "auto" and len(values):
# What quoting character are we using?
q = None
for e in values:
if len(e) == 0:
self.quoting = "unquoted"
break
elif q is None:
q = e[0]
if len(e) == 1 or e[0] != q or e[-1] != q:
self.quoting = "unquoted"
break
else:
self.quoting = "quoted"
if self.quoting == "quoted":
util.warn_deprecated(
"Manually quoting %s value literals is deprecated. Supply "
"unquoted values and use the quoting= option in cases of "
"ambiguity." % self.__class__.__name__
)
values = self._strip_values(values)
self._enumerated_values = values
length = max([len(v) for v in values] + [0])
return values, length
@classmethod
def _strip_values(cls, values):
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
return strip_values
class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum, _EnumeratedValues):
"""MySQL ENUM type."""
__visit_name__ = "ENUM"
native_enum = True
def __init__(self, *enums, **kw):
"""Construct an ENUM.
E.g.::
Column('myenum', ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below). This object may also be a PEP-435-compliant enumerated
type.
.. versionadded: 1.1 added support for PEP-435-compliant enumerated
types.
:param strict: This flag has no effect.
.. versionchanged:: The MySQL ENUM type as well as the base Enum
type now validates all Python data values.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
kw.pop("strict", None)
self._enum_init(enums, kw)
_StringType.__init__(self, length=self.length, **kw)
@classmethod
def adapt_emulated_to_native(cls, impl, **kw):
"""Produce a MySQL native :class:`.mysql.ENUM` from plain
:class:`.Enum`.
"""
kw.setdefault("validate_strings", impl.validate_strings)
kw.setdefault("values_callable", impl.values_callable)
return cls(**kw)
def _setup_for_values(self, values, objects, kw):
values, length = self._init_values(values, kw)
return super(ENUM, self)._setup_for_values(values, objects, kw)
def _object_value_for_elem(self, elem):
# mysql sends back a blank string for any value that
# was persisted that was not in the enums; that is, it does no
# validation on the incoming data, it "truncates" it to be
# the blank string. Return it straight.
if elem == "":
return elem
else:
return super(ENUM, self)._object_value_for_elem(elem)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[ENUM, _StringType, sqltypes.Enum]
)
class SET(_EnumeratedValues):
"""MySQL SET type."""
__visit_name__ = "SET"
def __init__(self, *values, **kw):
"""Construct a SET.
E.g.::
Column('myset', SET("foo", "bar", "baz"))
The list of potential values is required in the case that this
set will be used to generate DDL for a table, or if the
:paramref:`.SET.retrieve_as_bitwise` flag is set to True.
:param values: The range of valid values for this SET.
:param convert_unicode: Same flag as that of
:paramref:`.String.convert_unicode`.
:param collation: same as that of :paramref:`.String.collation`
:param charset: same as that of :paramref:`.VARCHAR.charset`.
:param ascii: same as that of :paramref:`.VARCHAR.ascii`.
:param unicode: same as that of :paramref:`.VARCHAR.unicode`.
:param binary: same as that of :paramref:`.VARCHAR.binary`.
:param quoting: Defaults to 'auto': automatically determine set value
quoting. If all values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
.. versionadded:: 0.9.0
:param retrieve_as_bitwise: if True, the data for the set type will be
persisted and selected using an integer value, where a set is coerced
into a bitwise mask for persistence. MySQL allows this mode which
has the advantage of being able to store values unambiguously,
such as the blank string ``''``. The datatype will appear
as the expression ``col + 0`` in a SELECT statement, so that the
value is coerced into an integer value in result sets.
This flag is required if one wishes
to persist a set that can store the blank string ``''`` as a value.
.. warning::
When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
essential that the list of set values is expressed in the
**exact same order** as exists on the MySQL database.
.. versionadded:: 1.0.0
"""
self.retrieve_as_bitwise = kw.pop("retrieve_as_bitwise", False)
values, length = self._init_values(values, kw)
self.values = tuple(values)
if not self.retrieve_as_bitwise and "" in values:
raise exc.ArgumentError(
"Can't use the blank value '' in a SET without "
"setting retrieve_as_bitwise=True"
)
if self.retrieve_as_bitwise:
self._bitmap = dict(
(value, 2 ** idx) for idx, value in enumerate(self.values)
)
self._bitmap.update(
(2 ** idx, value) for idx, value in enumerate(self.values)
)
kw.setdefault("length", length)
super(SET, self).__init__(**kw)
def column_expression(self, colexpr):
if self.retrieve_as_bitwise:
return sql.type_coerce(
sql.type_coerce(colexpr, sqltypes.Integer) + 0, self
)
else:
return colexpr
def result_processor(self, dialect, coltype):
if self.retrieve_as_bitwise:
def process(value):
if value is not None:
value = int(value)
return set(util.map_bits(self._bitmap.__getitem__, value))
else:
return None
else:
super_convert = super(SET, self).result_processor(dialect, coltype)
def process(value):
if isinstance(value, util.string_types):
# MySQLdb returns a string, let's parse
if super_convert:
value = super_convert(value)
return set(re.findall(r"[^,]+", value))
else:
# mysql-connector-python does a naive
# split(",") which throws in an empty string
if value is not None:
value.discard("")
return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
if self.retrieve_as_bitwise:
def process(value):
if value is None:
return None
elif isinstance(value, util.int_types + util.string_types):
if super_convert:
return super_convert(value)
else:
return value
else:
int_value = 0
for v in value:
int_value |= self._bitmap[v]
return int_value
else:
def process(value):
# accept strings and int (actually bitflag) values directly
if value is not None and not isinstance(
value, util.int_types + util.string_types
):
value = ",".join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, impltype, **kw):
kw["retrieve_as_bitwise"] = self.retrieve_as_bitwise
return util.constructor_copy(self, impltype, *self.values, **kw)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/oursql.py | # mysql/oursql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
.. note::
The OurSQL MySQL dialect is legacy and is no longer supported upstream,
and is **not tested as part of SQLAlchemy's continuous integration**.
The recommended MySQL dialects are mysqlclient and PyMySQL.
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
"""
from .base import BIT
from .base import MySQLDialect
from .base import MySQLExecutionContext
from ... import types as sqltypes
from ... import util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""oursql already converts mysql bits, so."""
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get("_oursql_plain_query", False)
class MySQLDialect_oursql(MySQLDialect):
driver = "oursql"
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs, {sqltypes.Time: sqltypes.Time, BIT: _oursqlBIT}
)
@classmethod
def dbapi(cls):
return __import__("oursql")
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of
*cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute("BEGIN", plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(
xid.encode(charset)
).decode(charset)
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(
query % arg
)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, "XA BEGIN %s", xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA PREPARE %s", xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA ROLLBACK %s", xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, "XA COMMIT %s", xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema,
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema,
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self, connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
return MySQLDialect._show_create_table(
self,
connection._contextual_connect(
close_with_result=True
).execution_options(_oursql_plain_query=True),
table,
charset,
full_name,
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return (
e.errno is None
and "cursor" not in e.args[1]
and e.args[1].endswith("closed")
)
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(
database="db", username="user", password="passwd"
)
opts.update(url.query)
util.coerce_kw_type(opts, "port", int)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "autoping", bool)
util.coerce_kw_type(opts, "raise_on_warnings", bool)
util.coerce_kw_type(opts, "default_charset", bool)
if opts.pop("default_charset", False):
opts["charset"] = None
else:
util.coerce_kw_type(opts, "charset", str)
opts["use_unicode"] = opts.get("use_unicode", True)
util.coerce_kw_type(opts, "use_unicode", bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
opts.setdefault("found_rows", True)
ssl = {}
for key in [
"ssl_ca",
"ssl_key",
"ssl_cert",
"ssl_capath",
"ssl_cipher",
]:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts["ssl"] = ssl
return [[], opts]
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py | # mysql/mysqlconnector.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+mysqlconnector
:name: MySQL Connector/Python
:dbapi: myconnpy
:connectstring: mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
:url: https://pypi.org/project/mysql-connector-python/
.. note::
The MySQL Connector/Python DBAPI has had many issues since its release,
some of which may remain unresolved, and the mysqlconnector dialect is
**not tested as part of SQLAlchemy's continuous integration**.
The recommended MySQL dialects are mysqlclient and PyMySQL.
""" # noqa
import re
from .base import BIT
from .base import MySQLCompiler
from .base import MySQLDialect
from .base import MySQLExecutionContext
from .base import MySQLIdentifierPreparer
from ... import processors
from ... import util
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
def get_lastrowid(self):
return self.cursor.lastrowid
class MySQLCompiler_mysqlconnector(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
if self.dialect._mysqlconnector_double_percents:
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
else:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
def post_process_text(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace("%", "%%")
else:
return text
def escape_literal_column(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace("%", "%%")
else:
return text
class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
@property
def _double_percents(self):
return self.dialect._mysqlconnector_double_percents
@_double_percents.setter
def _double_percents(self, value):
pass
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
if self.dialect._mysqlconnector_double_percents:
return value.replace("%", "%%")
else:
return value
class _myconnpyBIT(BIT):
def result_processor(self, dialect, coltype):
"""MySQL-connector already converts mysql bits, so."""
return None
class MySQLDialect_mysqlconnector(MySQLDialect):
driver = "mysqlconnector"
supports_unicode_binds = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = "format"
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
statement_compiler = MySQLCompiler_mysqlconnector
preparer = MySQLIdentifierPreparer_mysqlconnector
colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _myconnpyBIT})
def __init__(self, *arg, **kw):
super(MySQLDialect_mysqlconnector, self).__init__(*arg, **kw)
# hack description encoding since mysqlconnector randomly
# returns bytes or not
self._description_decoder = (
processors.to_conditional_unicode_processor_factory
)(self.description_encoding)
def _check_unicode_description(self, connection):
# hack description encoding since mysqlconnector randomly
# returns bytes or not
return False
@property
def description_encoding(self):
# total guess
return "latin-1"
@util.memoized_property
def supports_unicode_statements(self):
return util.py3k or self._mysqlconnector_version_info > (2, 0)
@classmethod
def dbapi(cls):
from mysql import connector
return connector
def do_ping(self, dbapi_connection):
try:
dbapi_connection.ping(False)
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, None):
return False
else:
raise
else:
return True
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
util.coerce_kw_type(opts, "allow_local_infile", bool)
util.coerce_kw_type(opts, "autocommit", bool)
util.coerce_kw_type(opts, "buffered", bool)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "connection_timeout", int)
util.coerce_kw_type(opts, "connect_timeout", int)
util.coerce_kw_type(opts, "consume_results", bool)
util.coerce_kw_type(opts, "force_ipv6", bool)
util.coerce_kw_type(opts, "get_warnings", bool)
util.coerce_kw_type(opts, "pool_reset_session", bool)
util.coerce_kw_type(opts, "pool_size", int)
util.coerce_kw_type(opts, "raise_on_warnings", bool)
util.coerce_kw_type(opts, "raw", bool)
util.coerce_kw_type(opts, "ssl_verify_cert", bool)
util.coerce_kw_type(opts, "use_pure", bool)
util.coerce_kw_type(opts, "use_unicode", bool)
# unfortunately, MySQL/connector python refuses to release a
# cursor without reading fully, so non-buffered isn't an option
opts.setdefault("buffered", True)
# FOUND_ROWS must be set in ClientFlag to enable
# supports_sane_rowcount.
if self.dbapi is not None:
try:
from mysql.connector.constants import ClientFlag
client_flags = opts.get(
"client_flags", ClientFlag.get_default()
)
client_flags |= ClientFlag.FOUND_ROWS
opts["client_flags"] = client_flags
except Exception:
pass
return [[], opts]
@util.memoized_property
def _mysqlconnector_version_info(self):
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
@util.memoized_property
def _mysqlconnector_double_percents(self):
return not util.py3k and self._mysqlconnector_version_info < (2, 0)
def _detect_charset(self, connection):
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno
def is_disconnect(self, e, connection, cursor):
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
if isinstance(e, exceptions):
return (
e.errno in errnos
or "MySQL Connection not available." in str(e)
or "Connection to MySQL is not available" in str(e)
)
else:
return False
def _compat_fetchall(self, rp, charset=None):
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"AUTOCOMMIT",
]
)
def _set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit = True
else:
connection.autocommit = False
super(MySQLDialect_mysqlconnector, self)._set_isolation_level(
connection, level
)
dialect = MySQLDialect_mysqlconnector
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/reflection.py | # mysql/reflection.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from .enumerated import _EnumeratedValues
from .enumerated import SET
from .types import DATETIME
from .types import TIME
from .types import TIMESTAMP
from ... import log
from ... import types as sqltypes
from ... import util
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.fk_constraints = []
self.ck_constraints = []
@log.class_logger
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r"\r?\n", show_create):
if line.startswith(" " + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(") "):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ")":
pass
elif line.startswith("CREATE "):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if
# loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == "key":
state.keys.append(spec)
elif type_ == "fk_constraint":
state.fk_constraints.append(spec)
elif type_ == "ck_constraint":
state.ck_constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
# NOTE: we may want to consider SHOW INDEX as the
# format of indexes in MySQL becomes more complex
spec["columns"] = self._parse_keyexprs(spec["columns"])
if spec["version_sql"]:
m2 = self._re_key_version_sql.match(spec["version_sql"])
if m2 and m2.groupdict()["parser"]:
spec["parser"] = m2.groupdict()["parser"]
if spec["parser"]:
spec["parser"] = self.preparer.unformat_identifiers(
spec["parser"]
)[0]
return "key", spec
# FOREIGN KEY CONSTRAINT
m = self._re_fk_constraint.match(line)
if m:
spec = m.groupdict()
spec["table"] = self.preparer.unformat_identifiers(spec["table"])
spec["local"] = [c[0] for c in self._parse_keyexprs(spec["local"])]
spec["foreign"] = [
c[0] for c in self._parse_keyexprs(spec["foreign"])
]
return "fk_constraint", spec
# CHECK constraint
m = self._re_ck_constraint.match(line)
if m:
spec = m.groupdict()
return "ck_constraint", spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return "partition", line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group("name"))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ")":
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group("directive"), m.group("val")
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub("", rest_of_line)
for nope in ("auto_increment", "data directory", "index directory"):
options.pop(nope, None)
for opt, val in options.items():
state.table_options["%s_%s" % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec["full"] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec["full"] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec["full"]:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args = spec["name"], spec["coltype"], spec["arg"]
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn(
"Did not recognize type '%s' of column '%s'" % (type_, name)
)
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == "":
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
if type_args:
type_kw["fsp"] = type_args.pop(0)
for kw in ("unsigned", "zerofill"):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ("charset", "collate"):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
if issubclass(col_type, SET) and "" in type_args:
type_kw["retrieve_as_bitwise"] = True
type_instance = col_type(*type_args, **type_kw)
col_kw = {}
# NOT NULL
col_kw["nullable"] = True
# this can be "NULL" in the case of TIMESTAMP
if spec.get("notnull", False) == "NOT NULL":
col_kw["nullable"] = False
# AUTO_INCREMENT
if spec.get("autoincr", False):
col_kw["autoincrement"] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw["autoincrement"] = False
# DEFAULT
default = spec.get("default", None)
if default == "NULL":
# eliminates the need to deal with this later.
default = None
comment = spec.get("comment", None)
if comment is not None:
comment = comment.replace("\\\\", "\\").replace("''", "'")
sqltext = spec.get("generated")
if sqltext is not None:
computed = dict(sqltext=sqltext)
persisted = spec.get("persistence")
if persisted is not None:
computed["persisted"] = persisted == "STORED"
col_kw["computed"] = computed
col_d = dict(
name=name, type=type_instance, default=default, comment=comment
)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = [
row[i] for i in (0, 1, 2, 4, 5)
]
line = [" "]
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append("NOT NULL")
if default:
if "auto_increment" in default:
pass
elif col_type.startswith("timestamp") and default.startswith(
"C"
):
line.append("DEFAULT")
line.append(default)
elif default == "NULL":
line.append("DEFAULT")
line.append(default)
else:
line.append("DEFAULT")
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(" ".join(line))
return "".join(
[
(
"CREATE TABLE %s (\n"
% self.preparer.quote_identifier(table_name)
),
",\n".join(buffer),
"\n) ",
]
)
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(
zip(
("iq", "fq", "esc_fq"),
[
re.escape(s)
for s in (
self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final),
)
],
)
)
self._pr_name = _pr_compile(
r"^CREATE (?:\w+ +)?TABLE +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($" % quotes,
self.preparer._unescape_identifier,
)
# `col`,`col2`(32),`col3`(15) DESC
#
self._re_keyexprs = _re_compile(
r"(?:"
r"(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)"
r"(?:\((\d+)\))?(?: +(ASC|DESC))?(?=\,|$))+" % quotes
)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r"\x27(?:\x27\x27|[^\x27])*\x27")
# 123 or 123,456
self._re_csv_int = _re_compile(r"\d+")
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r" "
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"(?P<coltype>\w+)"
r"(?:\((?P<arg>(?:\d+|\d+,\d+|"
r"(?:'(?:''|[^'])*',?)+))\))?"
r"(?: +(?P<unsigned>UNSIGNED))?"
r"(?: +(?P<zerofill>ZEROFILL))?"
r"(?: +CHARACTER SET +(?P<charset>[\w_]+))?"
r"(?: +COLLATE +(?P<collate>[\w_]+))?"
r"(?: +(?P<notnull>(?:NOT )?NULL))?"
r"(?: +DEFAULT +(?P<default>"
r"(?:NULL|'(?:''|[^'])*'|[\w\(\)]+"
r"(?: +ON UPDATE [\w\(\)]+)?)"
r"))?"
r"(?: +(?:GENERATED ALWAYS)? ?AS +(?P<generated>\("
r".*\))? ?(?P<persistence>VIRTUAL|STORED)?)?"
r"(?: +(?P<autoincr>AUTO_INCREMENT))?"
r"(?: +COMMENT +'(?P<comment>(?:''|[^'])*)')?"
r"(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?"
r"(?: +STORAGE +(?P<storage>\w+))?"
r"(?: +(?P<extra>.*))?"
r",?$" % quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r" "
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"(?P<coltype>\w+)"
r"(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?"
r".*?(?P<notnull>(?:NOT )NULL)?" % quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name /*!50100 WITH PARSER name */
self._re_key = _re_compile(
r" "
r"(?:(?P<type>\S+) )?KEY"
r"(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?"
r"(?: +USING +(?P<using_pre>\S+))?"
r" +\((?P<columns>.+?)\)"
r"(?: +USING +(?P<using_post>\S+))?"
r"(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?"
r"(?: +WITH PARSER +(?P<parser>\S+))?"
r"(?: +COMMENT +(?P<comment>(\x27\x27|\x27([^\x27])*?\x27)+))?"
r"(?: +/\*(?P<version_sql>.+)\*/ +)?"
r",?$" % quotes
)
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
# It means if the MySQL version >= \d+, execute what's in the comment
self._re_key_version_sql = _re_compile(
r"\!\d+ " r"(?: *WITH PARSER +(?P<parser>\S+) *)?"
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw["on"] = "RESTRICT|CASCADE|SET NULL|NOACTION"
self._re_fk_constraint = _re_compile(
r" "
r"CONSTRAINT +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"FOREIGN KEY +"
r"\((?P<local>[^\)]+?)\) REFERENCES +"
r"(?P<table>%(iq)s[^%(fq)s]+%(fq)s"
r"(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +"
r"\((?P<foreign>[^\)]+?)\)"
r"(?: +(?P<match>MATCH \w+))?"
r"(?: +ON DELETE (?P<ondelete>%(on)s))?"
r"(?: +ON UPDATE (?P<onupdate>%(on)s))?" % kw
)
# CONSTRAINT `CONSTRAINT_1` CHECK (`x` > 5)'
# testing on MariaDB 10.2 shows that the CHECK constraint
# is returned on a line by itself, so to match without worrying
# about parenthesis in the expresion we go to the end of the line
self._re_ck_constraint = _re_compile(
r" "
r"CONSTRAINT +"
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"CHECK +"
r"\((?P<sqltext>.+)\),?" % kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(r"(?:.*)(?:SUB)?PARTITION(?:.*)")
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted
# strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in (
"ENGINE",
"TYPE",
"AUTO_INCREMENT",
"AVG_ROW_LENGTH",
"CHARACTER SET",
"DEFAULT CHARSET",
"CHECKSUM",
"COLLATE",
"DELAY_KEY_WRITE",
"INSERT_METHOD",
"MAX_ROWS",
"MIN_ROWS",
"PACK_KEYS",
"ROW_FORMAT",
"KEY_BLOCK_SIZE",
):
self._add_option_word(option)
self._add_option_regex("UNION", r"\([^\)]+\)")
self._add_option_regex("TABLESPACE", r".*? STORAGE DISK")
self._add_option_regex(
"RAID_TYPE",
r"\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+",
)
_optional_equals = r"(?:\s*(?:=\s*)|\s+)"
def _add_option_string(self, directive):
regex = r"(?P<directive>%s)%s" r"'(?P<val>(?:[^']|'')*?)'(?!')" % (
re.escape(directive),
self._optional_equals,
)
self._pr_options.append(
_pr_compile(
regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
)
)
def _add_option_word(self, directive):
regex = r"(?P<directive>%s)%s" r"(?P<val>\w+)" % (
re.escape(directive),
self._optional_equals,
)
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = r"(?P<directive>%s)%s" r"(?P<val>%s)" % (
re.escape(directive),
self._optional_equals,
regex,
)
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = (
"COMMENT",
"DATA DIRECTORY",
"INDEX DIRECTORY",
"PASSWORD",
"CONNECTION",
)
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/zxjdbc.py | # mysql/zxjdbc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+zxjdbc
:name: zxjdbc for Jython
:dbapi: zxjdbc
:connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
Character Sets
--------------
SQLAlchemy zxjdbc dialects pass unicode straight through to the
zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
``characterEncoding`` connection property to ``UTF-8``. It may be
overridden via a ``create_engine`` URL parameter.
""" # noqa
import re
from .base import BIT
from .base import MySQLDialect
from .base import MySQLExecutionContext
from ... import types as sqltypes
from ... import util
from ...connectors.zxJDBC import ZxJDBCConnector
class _ZxJDBCBit(BIT):
def result_processor(self, dialect, coltype):
"""Converts boolean or byte arrays from MySQL Connector/J to longs."""
def process(value):
if value is None:
return value
if isinstance(value, bool):
return int(value)
v = 0
for i in value:
v = v << 8 | (i & 0xFF)
value = v
return value
return process
class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
jdbc_db_name = "mysql"
jdbc_driver_name = "com.mysql.jdbc.Driver"
execution_ctx_cls = MySQLExecutionContext_zxjdbc
colspecs = util.update_copy(
MySQLDialect.colspecs, {sqltypes.Time: sqltypes.Time, BIT: _ZxJDBCBit}
)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = {row[0]: row[1] for row in self._compat_fetchall(rs)}
for key in ("character_set_connection", "character_set"):
if opts.get(key, None):
return opts[key]
util.warn(
"Could not detect the connection character set. "
"Assuming latin1."
)
return "latin1"
def _driver_kwargs(self):
"""return kw arg dict to be sent to connect()."""
return dict(characterEncoding="UTF-8", yearIsDateType="false")
def _extract_error_code(self, exception):
# e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
# [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile(r"[.\-]")
for n in r.split(dbapi_con.dbversion):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
dialect = MySQLDialect_zxjdbc
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/__init__.py | # mysql/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base # noqa
from . import cymysql # noqa
from . import gaerdbms # noqa
from . import mysqlconnector # noqa
from . import mysqldb # noqa
from . import oursql # noqa
from . import pymysql # noqa
from . import pyodbc # noqa
from . import zxjdbc # noqa
from .base import BIGINT
from .base import BINARY
from .base import BIT
from .base import BLOB
from .base import BOOLEAN
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import DECIMAL
from .base import DOUBLE
from .base import ENUM
from .base import FLOAT
from .base import INTEGER
from .base import JSON
from .base import LONGBLOB
from .base import LONGTEXT
from .base import MEDIUMBLOB
from .base import MEDIUMINT
from .base import MEDIUMTEXT
from .base import NCHAR
from .base import NUMERIC
from .base import NVARCHAR
from .base import REAL
from .base import SET
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import TINYBLOB
from .base import TINYINT
from .base import TINYTEXT
from .base import VARBINARY
from .base import VARCHAR
from .base import YEAR
from .dml import Insert
from .dml import insert
# default dialect
base.dialect = dialect = mysqldb.dialect
__all__ = (
"BIGINT",
"BINARY",
"BIT",
"BLOB",
"BOOLEAN",
"CHAR",
"DATE",
"DATETIME",
"DECIMAL",
"DOUBLE",
"ENUM",
"DECIMAL",
"FLOAT",
"INTEGER",
"INTEGER",
"JSON",
"LONGBLOB",
"LONGTEXT",
"MEDIUMBLOB",
"MEDIUMINT",
"MEDIUMTEXT",
"NCHAR",
"NVARCHAR",
"NUMERIC",
"SET",
"SMALLINT",
"REAL",
"TEXT",
"TIME",
"TIMESTAMP",
"TINYBLOB",
"TINYINT",
"TINYTEXT",
"VARBINARY",
"VARCHAR",
"YEAR",
"dialect",
"insert",
"Insert",
)
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/types.py | # mysql/types.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import datetime
from ... import exc
from ... import types as sqltypes
from ... import util
class _NumericType(object):
"""Base for MySQL numeric types.
This is the base both for NUMERIC as well as INTEGER, hence
it's a mixin.
"""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_NumericType, sqltypes.Numeric]
)
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and (
(precision is None and scale is not None)
or (precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether."
)
super(_FloatType, self).__init__(
precision=precision, asdecimal=asdecimal, **kw
)
self.scale = scale
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_FloatType, _NumericType, sqltypes.Float]
)
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_IntegerType, _NumericType, sqltypes.Integer]
)
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(
self,
charset=None,
collation=None,
ascii=False, # noqa
binary=False,
unicode=False,
national=False,
**kw
):
self.charset = charset
# allow collate= or collation=
kw.setdefault("collation", kw.pop("collate", collation))
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[_StringType, sqltypes.String]
)
class _MatchType(sqltypes.Float, sqltypes.MatchType):
def __init__(self, **kw):
# TODO: float arguments?
sqltypes.Float.__init__(self)
sqltypes.MatchType.__init__(self)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = "NUMERIC"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = "DECIMAL"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = "DOUBLE"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
.. note::
The :class:`.DOUBLE` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = "REAL"
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
.. note::
The :class:`.REAL` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = "FLOAT"
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal, **kw
)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = "INTEGER"
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = "BIGINT"
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = "MEDIUMINT"
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = "TINYINT"
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = "SMALLINT"
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
MSTinyInteger() type.
"""
__visit_name__ = "BIT"
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0
for i in value:
if not isinstance(i, int):
i = ord(i) # convert byte to int on Python 2
v = v << 8 | i
return v
return value
return process
class TIME(sqltypes.TIME):
"""MySQL TIME type. """
__visit_name__ = "TIME"
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
"""
super(TIME, self).__init__(timezone=timezone)
self.fsp = fsp
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
microseconds = value.microseconds
seconds = value.seconds
minutes = seconds // 60
return time(
minutes // 60,
minutes % 60,
seconds - minutes * 60,
microsecond=microseconds,
)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
"""
__visit_name__ = "TIMESTAMP"
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIMESTAMP type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIMESTAMP type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
self.fsp = fsp
class DATETIME(sqltypes.DATETIME):
"""MySQL DATETIME type.
"""
__visit_name__ = "DATETIME"
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL DATETIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the DATETIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
"""
super(DATETIME, self).__init__(timezone=timezone)
self.fsp = fsp
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = "YEAR"
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = "TEXT"
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = "TINYTEXT"
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = "MEDIUMTEXT"
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = "LONGTEXT"
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = "VARCHAR"
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = "CHAR"
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)
if isinstance(type_, sqltypes.CHAR):
return type_
elif isinstance(type_, _StringType):
return CHAR(
length=type_.length,
charset=type_.charset,
collation=type_.collation,
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
national=False, # not supported in CAST
)
else:
return CHAR(length=type_.length)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = "NVARCHAR"
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs["national"] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = "NCHAR"
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs["national"] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = "TINYBLOB"
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = "MEDIUMBLOB"
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = "LONGBLOB"
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/provision.py | from ...testing.provision import configure_follower
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import temp_table_keyword_args
@create_db.for_db("mysql")
def _mysql_create_db(cfg, eng, ident):
with eng.connect() as conn:
try:
_mysql_drop_db(cfg, conn, ident)
except Exception:
pass
conn.execute("CREATE DATABASE %s CHARACTER SET utf8mb4" % ident)
conn.execute(
"CREATE DATABASE %s_test_schema CHARACTER SET utf8mb4" % ident
)
conn.execute(
"CREATE DATABASE %s_test_schema_2 CHARACTER SET utf8mb4" % ident
)
@configure_follower.for_db("mysql")
def _mysql_configure_follower(config, ident):
config.test_schema = "%s_test_schema" % ident
config.test_schema_2 = "%s_test_schema_2" % ident
@drop_db.for_db("mysql")
def _mysql_drop_db(cfg, eng, ident):
with eng.connect() as conn:
conn.execute("DROP DATABASE %s_test_schema" % ident)
conn.execute("DROP DATABASE %s_test_schema_2" % ident)
conn.execute("DROP DATABASE %s" % ident)
@temp_table_keyword_args.for_db("mysql")
def _mysql_temp_table_keyword_args(cfg, eng):
return {"prefixes": ["TEMPORARY"]}
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/mysqldb.py | # mysql/mysqldb.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqldb
:name: mysqlclient (maintained fork of MySQL-Python)
:dbapi: mysqldb
:connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: https://pypi.org/project/mysqlclient/
Driver Status
-------------
The mysqlclient DBAPI is a maintained fork of the
`MySQL-Python <http://sourceforge.net/projects/mysql-python>`_ DBAPI
that is no longer maintained. `mysqlclient`_ supports Python 2 and Python 3
and is very stable.
.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
.. _mysqldb_unicode:
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
Using MySQLdb with Google Cloud SQL
-----------------------------------
Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
using a URL like the following::
mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
Server Side Cursors
-------------------
The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`.
"""
import re
from .base import MySQLCompiler
from .base import MySQLDialect
from .base import MySQLExecutionContext
from .base import MySQLIdentifierPreparer
from .base import TEXT
from ... import sql
from ... import util
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
@property
def rowcount(self):
if hasattr(self, "_rowcount"):
return self._rowcount
else:
return self.cursor.rowcount
class MySQLCompiler_mysqldb(MySQLCompiler):
pass
class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
pass
class MySQLDialect_mysqldb(MySQLDialect):
driver = "mysqldb"
supports_unicode_statements = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = "format"
execution_ctx_cls = MySQLExecutionContext_mysqldb
statement_compiler = MySQLCompiler_mysqldb
preparer = MySQLIdentifierPreparer_mysqldb
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_mysqldb, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
self._mysql_dbapi_version = (
self._parse_dbapi_version(self.dbapi.__version__)
if self.dbapi is not None and hasattr(self.dbapi, "__version__")
else (0, 0, 0)
)
def _parse_dbapi_version(self, version):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
else:
return (0, 0, 0)
@util.langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__("MySQLdb.cursors").cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__("MySQLdb")
def on_connect(self):
super_ = super(MySQLDialect_mysqldb, self).on_connect()
def on_connect(conn):
if super_ is not None:
super_(conn)
charset_name = conn.character_set_name()
if charset_name is not None:
cursor = conn.cursor()
cursor.execute("SET NAMES %s" % charset_name)
cursor.close()
return on_connect
def do_ping(self, dbapi_connection):
try:
dbapi_connection.ping(False)
except self.dbapi.Error as err:
if self.is_disconnect(err, dbapi_connection, None):
return False
else:
raise
else:
return True
def do_executemany(self, cursor, statement, parameters, context=None):
rowcount = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = rowcount
def _check_unicode_returns(self, connection):
# work around issue fixed in
# https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
# specific issue w/ the utf8mb4_bin collation and unicode returns
has_utf8mb4_bin = self.server_version_info > (
5,
) and connection.scalar(
"show collation where %s = 'utf8mb4' and %s = 'utf8mb4_bin'"
% (
self.identifier_preparer.quote("Charset"),
self.identifier_preparer.quote("Collation"),
)
)
if has_utf8mb4_bin:
additional_tests = [
sql.collate(
sql.cast(
sql.literal_column("'test collated returns'"),
TEXT(charset="utf8mb4"),
),
"utf8mb4_bin",
)
]
else:
additional_tests = []
return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
connection, additional_tests
)
def create_connect_args(self, url):
opts = url.translate_connect_args(
database="db", username="user", password="passwd"
)
opts.update(url.query)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "connect_timeout", int)
util.coerce_kw_type(opts, "read_timeout", int)
util.coerce_kw_type(opts, "write_timeout", int)
util.coerce_kw_type(opts, "client_flag", int)
util.coerce_kw_type(opts, "local_infile", int)
# Note: using either of the below will cause all strings to be
# returned as Unicode, both in raw SQL operations and with column
# types like String and MSString.
util.coerce_kw_type(opts, "use_unicode", bool)
util.coerce_kw_type(opts, "charset", str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
keys = ["ssl_ca", "ssl_key", "ssl_cert", "ssl_capath", "ssl_cipher"]
for key in keys:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts["ssl"] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get("client_flag", 0)
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + ".constants.CLIENT"
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts["client_flag"] = client_flag
return [[], opts]
def _extract_error_code(self, exception):
return exception.args[0]
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
try:
# note: the SQL here would be
# "SHOW VARIABLES LIKE 'character_set%%'"
cset_name = connection.connection.character_set_name
except AttributeError:
util.warn(
"No 'character_set_name' can be detected with "
"this MySQL-Python version; "
"please upgrade to a recent version of MySQL-Python. "
"Assuming latin1."
)
return "latin1"
else:
return cset_name()
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"AUTOCOMMIT",
]
)
def _set_isolation_level(self, connection, level):
if level == "AUTOCOMMIT":
connection.autocommit(True)
else:
connection.autocommit(False)
super(MySQLDialect_mysqldb, self)._set_isolation_level(
connection, level
)
dialect = MySQLDialect_mysqldb
| 0 |
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects | qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/sqlalchemy/dialects/mysql/pymysql.py | # mysql/pymysql.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>]
:url: https://pymysql.readthedocs.io/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
to the pymysql driver as well.
""" # noqa
from .mysqldb import MySQLDialect_mysqldb
from ...util import langhelpers
from ...util import py3k
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = "pymysql"
description_encoding = None
# generally, these two values should be both True
# or both False. PyMySQL unicode tests pass all the way back
# to 0.4 either way. See [ticket:3337]
supports_unicode_statements = True
supports_unicode_binds = True
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_pymysql, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
@langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__("pymysql.cursors").cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__("pymysql")
def is_disconnect(self, e, connection, cursor):
if super(MySQLDialect_pymysql, self).is_disconnect(
e, connection, cursor
):
return True
elif isinstance(e, self.dbapi.Error):
str_e = str(e).lower()
return (
"already closed" in str_e or "connection was killed" in str_e
)
else:
return False
if py3k:
def _extract_error_code(self, exception):
if isinstance(exception.args[0], Exception):
exception = exception.args[0]
return exception.args[0]
dialect = MySQLDialect_pymysql
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.