repo_id
stringclasses
208 values
file_path
stringlengths
31
190
content
stringlengths
1
2.65M
__index_level_0__
int64
0
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT+0
TZif2GMTTZif2GMT GMT0
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-2
TZif2 +02TZif2 +02 <+02>-2
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-5
TZif2FP+05TZif2FP+05 <+05>-5
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/UTC
TZif2UTCTZif2UTC UTC0
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-4
TZif28@+04TZif28@+04 <+04>-4
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/pytz/zoneinfo/Etc/GMT-3
TZif2*0+03TZif2*0+03 <+03>-3
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Flask_Cors-3.0.10.dist-info/RECORD
Flask_Cors-3.0.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 Flask_Cors-3.0.10.dist-info/LICENSE,sha256=bhob3FSDTB4HQMvOXV9vLK4chG_Sp_SCsRZJWU-vvV0,1069 Flask_Cors-3.0.10.dist-info/METADATA,sha256=GGjB2MELGVMzpRA98u66-Y4kjXwJvRuEzuv9JuQaBpc,5382 Flask_Cors-3.0.10.dist-info/RECORD,, Flask_Cors-3.0.10.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 Flask_Cors-3.0.10.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 Flask_Cors-3.0.10.dist-info/top_level.txt,sha256=aWye_0QNZPp_QtPF4ZluLHqnyVLT9CPJsfiGhwqkWuo,11 flask_cors/__init__.py,sha256=oJExwfR7yU3HAsmQ_EfL6KoLK3zq3J9HsET9r-56sfM,791 flask_cors/__pycache__/__init__.cpython-39.pyc,, flask_cors/__pycache__/core.cpython-39.pyc,, flask_cors/__pycache__/decorator.cpython-39.pyc,, flask_cors/__pycache__/extension.cpython-39.pyc,, flask_cors/__pycache__/version.cpython-39.pyc,, flask_cors/core.py,sha256=N6dEVe5dffaQTw79Mc8IvEeTzvY_YsKCiOZ1lJ_PyNk,13894 flask_cors/decorator.py,sha256=iiwjUi0lVeCm4OJJHY5Cvuzj2nENbUns3Iq6zqKXuss,4937 flask_cors/extension.py,sha256=HTSAUEDH8mvTLLMVrcpfbtrdh5OXK72VUPk_FAoQhpU,7586 flask_cors/version.py,sha256=8OdYCyhDLC6EsmyL3_m3G4XCKOJMI20UlrLKmiyEoCE,23
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Flask_Cors-3.0.10.dist-info/LICENSE
Copyright (C) 2016 Cory Dolphin, Olin College Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Flask_Cors-3.0.10.dist-info/WHEEL
Wheel-Version: 1.0 Generator: bdist_wheel (0.36.2) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Flask_Cors-3.0.10.dist-info/top_level.txt
flask_cors
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Flask_Cors-3.0.10.dist-info/INSTALLER
pip
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/Flask_Cors-3.0.10.dist-info/METADATA
Metadata-Version: 2.1 Name: Flask-Cors Version: 3.0.10 Summary: A Flask extension adding a decorator for CORS support Home-page: https://github.com/corydolphin/flask-cors Author: Cory Dolphin Author-email: corydolphin@gmail.com License: MIT Platform: any Classifier: Environment :: Web Environment Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content Classifier: Topic :: Software Development :: Libraries :: Python Modules Requires-Dist: Flask (>=0.9) Requires-Dist: Six Flask-CORS ========== |Build Status| |Latest Version| |Supported Python versions| |License| A Flask extension for handling Cross Origin Resource Sharing (CORS), making cross-origin AJAX possible. This package has a simple philosophy: when you want to enable CORS, you wish to enable it for all use cases on a domain. This means no mucking around with different allowed headers, methods, etc. By default, submission of cookies across domains is disabled due to the security implications. Please see the documentation for how to enable credential'ed requests, and please make sure you add some sort of `CSRF <http://en.wikipedia.org/wiki/Cross-site_request_forgery>`__ protection before doing so! Installation ------------ Install the extension with using pip, or easy\_install. .. code:: bash $ pip install -U flask-cors Usage ----- This package exposes a Flask extension which by default enables CORS support on all routes, for all origins and methods. It allows parameterization of all CORS headers on a per-resource level. The package also contains a decorator, for those who prefer this approach. Simple Usage ~~~~~~~~~~~~ In the simplest case, initialize the Flask-Cors extension with default arguments in order to allow CORS for all domains on all routes. See the full list of options in the `documentation <https://flask-cors.corydolphin.com/en/latest/api.html#extension>`__. .. code:: python from flask import Flask from flask_cors import CORS app = Flask(__name__) CORS(app) @app.route("/") def helloWorld(): return "Hello, cross-origin-world!" Resource specific CORS ^^^^^^^^^^^^^^^^^^^^^^ Alternatively, you can specify CORS options on a resource and origin level of granularity by passing a dictionary as the `resources` option, mapping paths to a set of options. See the full list of options in the `documentation <https://flask-cors.corydolphin.com/en/latest/api.html#extension>`__. .. code:: python app = Flask(__name__) cors = CORS(app, resources={r"/api/*": {"origins": "*"}}) @app.route("/api/v1/users") def list_users(): return "user example" Route specific CORS via decorator ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This extension also exposes a simple decorator to decorate flask routes with. Simply add ``@cross_origin()`` below a call to Flask's ``@app.route(..)`` to allow CORS on a given route. See the full list of options in the `decorator documentation <https://flask-cors.corydolphin.com/en/latest/api.html#decorator>`__. .. code:: python @app.route("/") @cross_origin() def helloWorld(): return "Hello, cross-origin-world!" Documentation ------------- For a full list of options, please see the full `documentation <https://flask-cors.corydolphin.com/en/latest/api.html>`__ Troubleshooting --------------- If things aren't working as you expect, enable logging to help understand what is going on under the hood, and why. .. code:: python logging.getLogger('flask_cors').level = logging.DEBUG Tests ----- A simple set of tests is included in ``test/``. To run, install nose, and simply invoke ``nosetests`` or ``python setup.py test`` to exercise the tests. Contributing ------------ Questions, comments or improvements? Please create an issue on `Github <https://github.com/corydolphin/flask-cors>`__, tweet at `@corydolphin <https://twitter.com/corydolphin>`__ or send me an email. I do my best to include every contribution proposed in any way that I can. Credits ------- This Flask extension is based upon the `Decorator for the HTTP Access Control <http://flask.pocoo.org/snippets/56/>`__ written by Armin Ronacher. .. |Build Status| image:: https://api.travis-ci.org/corydolphin/flask-cors.svg?branch=master :target: https://travis-ci.org/corydolphin/flask-cors .. |Latest Version| image:: https://img.shields.io/pypi/v/Flask-Cors.svg :target: https://pypi.python.org/pypi/Flask-Cors/ .. |Supported Python versions| image:: https://img.shields.io/pypi/pyversions/Flask-Cors.svg :target: https://img.shields.io/pypi/pyversions/Flask-Cors.svg .. |License| image:: http://img.shields.io/:license-mit-blue.svg :target: https://pypi.python.org/pypi/Flask-Cors/
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/interfaces.py
# sqlalchemy/interfaces.py # Copyright (C) 2007-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # Copyright (C) 2007 Jason Kirtland jek@discorporate.us # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Deprecated core event interfaces. .. deprecated:: 0.7 As of SQLAlchemy 0.7, the new event system described in :ref:`event_toplevel` replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing. """ from . import event from . import util class PoolListener(object): """Hooks into the lifecycle of connections in a :class:`_pool.Pool`. .. deprecated:: 0.7 :class:`.PoolListener` is deprecated and will be removed in a future release. Please refer to :func:`.event.listen` in conjunction with the :class:`_events.PoolEvents` listener interface. Usage:: class MyListener(PoolListener): def connect(self, dbapi_con, con_record): '''perform connect operations''' # etc. # create a new pool with a listener p = QueuePool(..., listeners=[MyListener()]) # add a listener after the fact p.add_listener(MyListener()) # usage with create_engine() e = create_engine("url://", listeners=[MyListener()]) All of the standard connection :class:`~sqlalchemy.pool.Pool` types can accept event listeners for key connection lifecycle events: creation, pool check-out and check-in. There are no events fired when a connection closes. For any given DB-API connection, there will be one ``connect`` event, `n` number of ``checkout`` events, and either `n` or `n - 1` ``checkin`` events. (If a ``Connection`` is detached from its pool via the ``detach()`` method, it won't be checked back in.) These are low-level events for low-level objects: raw Python DB-API connections, without the conveniences of the SQLAlchemy ``Connection`` wrapper, ``Dialect`` services or ``ClauseElement`` execution. If you execute SQL through the connection, explicitly closing all cursors and other resources is recommended. Events also receive a ``_ConnectionRecord``, a long-lived internal ``Pool`` object that basically represents a "slot" in the connection pool. ``_ConnectionRecord`` objects have one public attribute of note: ``info``, a dictionary whose contents are scoped to the lifetime of the DB-API connection managed by the record. You can use this shared storage area however you like. There is no need to subclass ``PoolListener`` to handle events. Any class that implements one or more of these methods can be used as a pool listener. The ``Pool`` will inspect the methods provided by a listener object and add the listener to one or more internal event queues based on its capabilities. In terms of efficiency and function call overhead, you're much better off only providing implementations for the hooks you'll be using. """ @classmethod def _adapt_listener(cls, self, listener): """Adapt a :class:`.PoolListener` to individual :class:`event.Dispatch` events. """ methods = ["connect", "first_connect", "checkout", "checkin"] listener = util.as_interface(listener, methods=methods) for meth in methods: me_meth = getattr(PoolListener, meth) ls_meth = getattr(listener, meth, None) if ls_meth is not None and not util.methods_equivalent( me_meth, ls_meth ): util.warn_deprecated( "PoolListener.%s is deprecated. The " "PoolListener class will be removed in a future " "release. Please transition to the @event interface, " "using @event.listens_for(Engine, '%s')." % (meth, meth) ) if hasattr(listener, "connect"): event.listen(self, "connect", listener.connect) if hasattr(listener, "first_connect"): event.listen(self, "first_connect", listener.first_connect) if hasattr(listener, "checkout"): event.listen(self, "checkout", listener.checkout) if hasattr(listener, "checkin"): event.listen(self, "checkin", listener.checkin) def connect(self, dbapi_con, con_record): """Called once for each new DB-API connection or Pool's ``creator()``. dbapi_con A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). con_record The ``_ConnectionRecord`` that persistently manages the connection """ def first_connect(self, dbapi_con, con_record): """Called exactly once for the first DB-API connection. dbapi_con A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). con_record The ``_ConnectionRecord`` that persistently manages the connection """ def checkout(self, dbapi_con, con_record, con_proxy): """Called when a connection is retrieved from the Pool. dbapi_con A raw DB-API connection con_record The ``_ConnectionRecord`` that persistently manages the connection con_proxy The ``_ConnectionFairy`` which manages the connection for the span of the current checkout. If you raise an ``exc.DisconnectionError``, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection. """ def checkin(self, dbapi_con, con_record): """Called when a connection returns to the pool. Note that the connection may be closed, and may be None if the connection has been invalidated. ``checkin`` will not be called for detached connections. (They do not return to the pool.) dbapi_con A raw DB-API connection con_record The ``_ConnectionRecord`` that persistently manages the connection """ class ConnectionProxy(object): """Allows interception of statement execution by Connections. .. deprecated:: 0.7 :class:`.ConnectionProxy` is deprecated and will be removed in a future release. Please refer to :func:`.event.listen` in conjunction with the :class:`_events.ConnectionEvents` listener interface. Either or both of the ``execute()`` and ``cursor_execute()`` may be implemented to intercept compiled statement and cursor level executions, e.g.:: class MyProxy(ConnectionProxy): def execute(self, conn, execute, clauseelement, *multiparams, **params): print "compiled statement:", clauseelement return execute(clauseelement, *multiparams, **params) def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): print "raw statement:", statement return execute(cursor, statement, parameters, context) The ``execute`` argument is a function that will fulfill the default execution behavior for the operation. The signature illustrated in the example should be used. The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via the ``proxy`` argument:: e = create_engine('someurl://', proxy=MyProxy()) """ @classmethod def _adapt_listener(cls, self, listener): methods = [ "execute", "cursor_execute", "begin", "rollback", "commit", "savepoint", "rollback_savepoint", "release_savepoint", "begin_twophase", "prepare_twophase", "rollback_twophase", "commit_twophase", ] for meth in methods: me_meth = getattr(ConnectionProxy, meth) ls_meth = getattr(listener, meth) if not util.methods_equivalent(me_meth, ls_meth): util.warn_deprecated( "ConnectionProxy.%s is deprecated. The " "ConnectionProxy class will be removed in a future " "release. Please transition to the @event interface, " "using @event.listens_for(Engine, '%s')." % (meth, meth) ) def adapt_execute(conn, clauseelement, multiparams, params): def execute_wrapper(clauseelement, *multiparams, **params): return clauseelement, multiparams, params return listener.execute( conn, execute_wrapper, clauseelement, *multiparams, **params ) event.listen(self, "before_execute", adapt_execute) def adapt_cursor_execute( conn, cursor, statement, parameters, context, executemany ): def execute_wrapper(cursor, statement, parameters, context): return statement, parameters return listener.cursor_execute( execute_wrapper, cursor, statement, parameters, context, executemany, ) event.listen(self, "before_cursor_execute", adapt_cursor_execute) def do_nothing_callback(*arg, **kw): pass def adapt_listener(fn): def go(conn, *arg, **kw): fn(conn, do_nothing_callback, *arg, **kw) return util.update_wrapper(go, fn) event.listen(self, "begin", adapt_listener(listener.begin)) event.listen(self, "rollback", adapt_listener(listener.rollback)) event.listen(self, "commit", adapt_listener(listener.commit)) event.listen(self, "savepoint", adapt_listener(listener.savepoint)) event.listen( self, "rollback_savepoint", adapt_listener(listener.rollback_savepoint), ) event.listen( self, "release_savepoint", adapt_listener(listener.release_savepoint), ) event.listen( self, "begin_twophase", adapt_listener(listener.begin_twophase) ) event.listen( self, "prepare_twophase", adapt_listener(listener.prepare_twophase) ) event.listen( self, "rollback_twophase", adapt_listener(listener.rollback_twophase), ) event.listen( self, "commit_twophase", adapt_listener(listener.commit_twophase) ) def execute(self, conn, execute, clauseelement, *multiparams, **params): """Intercept high level execute() events.""" return execute(clauseelement, *multiparams, **params) def cursor_execute( self, execute, cursor, statement, parameters, context, executemany ): """Intercept low-level cursor execute() events.""" return execute(cursor, statement, parameters, context) def begin(self, conn, begin): """Intercept begin() events.""" return begin() def rollback(self, conn, rollback): """Intercept rollback() events.""" return rollback() def commit(self, conn, commit): """Intercept commit() events.""" return commit() def savepoint(self, conn, savepoint, name=None): """Intercept savepoint() events.""" return savepoint(name=name) def rollback_savepoint(self, conn, rollback_savepoint, name, context): """Intercept rollback_savepoint() events.""" return rollback_savepoint(name, context) def release_savepoint(self, conn, release_savepoint, name, context): """Intercept release_savepoint() events.""" return release_savepoint(name, context) def begin_twophase(self, conn, begin_twophase, xid): """Intercept begin_twophase() events.""" return begin_twophase(xid) def prepare_twophase(self, conn, prepare_twophase, xid): """Intercept prepare_twophase() events.""" return prepare_twophase(xid) def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared): """Intercept rollback_twophase() events.""" return rollback_twophase(xid, is_prepared) def commit_twophase(self, conn, commit_twophase, xid, is_prepared): """Intercept commit_twophase() events.""" return commit_twophase(xid, is_prepared)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/log.py
# sqlalchemy/log.py # Copyright (C) 2006-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Logging control and utilities. Control of logging for SA can be performed from the regular python logging module. The regular dotted module namespace is used, starting at 'sqlalchemy'. For class-level logging, the class name is appended. The "echo" keyword parameter, available on SQLA :class:`_engine.Engine` and :class:`_pool.Pool` objects, corresponds to a logger specific to that instance only. """ import logging import sys # set initial level to WARN. This so that # log statements don't occur in the absence of explicit # logging being enabled for 'sqlalchemy'. rootlogger = logging.getLogger("sqlalchemy") if rootlogger.level == logging.NOTSET: rootlogger.setLevel(logging.WARN) def _add_default_handler(logger): handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter("%(asctime)s %(levelname)s %(name)s %(message)s") ) logger.addHandler(handler) _logged_classes = set() def class_logger(cls): logger = logging.getLogger(cls.__module__ + "." + cls.__name__) cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG) cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO) cls.logger = logger _logged_classes.add(cls) return cls class Identified(object): logging_name = None def _should_log_debug(self): return self.logger.isEnabledFor(logging.DEBUG) def _should_log_info(self): return self.logger.isEnabledFor(logging.INFO) class InstanceLogger(object): """A logger adapter (wrapper) for :class:`.Identified` subclasses. This allows multiple instances (e.g. Engine or Pool instances) to share a logger, but have its verbosity controlled on a per-instance basis. The basic functionality is to return a logging level which is based on an instance's echo setting. Default implementation is: 'debug' -> logging.DEBUG True -> logging.INFO False -> Effective level of underlying logger ( logging.WARNING by default) None -> same as False """ # Map echo settings to logger levels _echo_map = { None: logging.NOTSET, False: logging.NOTSET, True: logging.INFO, "debug": logging.DEBUG, } def __init__(self, echo, name): self.echo = echo self.logger = logging.getLogger(name) # if echo flag is enabled and no handlers, # add a handler to the list if self._echo_map[echo] <= logging.INFO and not self.logger.handlers: _add_default_handler(self.logger) # # Boilerplate convenience methods # def debug(self, msg, *args, **kwargs): """Delegate a debug call to the underlying logger.""" self.log(logging.DEBUG, msg, *args, **kwargs) def info(self, msg, *args, **kwargs): """Delegate an info call to the underlying logger.""" self.log(logging.INFO, msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): """Delegate a warning call to the underlying logger.""" self.log(logging.WARNING, msg, *args, **kwargs) warn = warning def error(self, msg, *args, **kwargs): """ Delegate an error call to the underlying logger. """ self.log(logging.ERROR, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): """Delegate an exception call to the underlying logger.""" kwargs["exc_info"] = 1 self.log(logging.ERROR, msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): """Delegate a critical call to the underlying logger.""" self.log(logging.CRITICAL, msg, *args, **kwargs) def log(self, level, msg, *args, **kwargs): """Delegate a log call to the underlying logger. The level here is determined by the echo flag as well as that of the underlying logger, and logger._log() is called directly. """ # inline the logic from isEnabledFor(), # getEffectiveLevel(), to avoid overhead. if self.logger.manager.disable >= level: return selected_level = self._echo_map[self.echo] if selected_level == logging.NOTSET: selected_level = self.logger.getEffectiveLevel() if level >= selected_level: self.logger._log(level, msg, args, **kwargs) def isEnabledFor(self, level): """Is this logger enabled for level 'level'?""" if self.logger.manager.disable >= level: return False return level >= self.getEffectiveLevel() def getEffectiveLevel(self): """What's the effective level for this logger?""" level = self._echo_map[self.echo] if level == logging.NOTSET: level = self.logger.getEffectiveLevel() return level def instance_logger(instance, echoflag=None): """create a logger for an instance that implements :class:`.Identified`.""" if instance.logging_name: name = "%s.%s.%s" % ( instance.__class__.__module__, instance.__class__.__name__, instance.logging_name, ) else: name = "%s.%s" % ( instance.__class__.__module__, instance.__class__.__name__, ) instance._echo = echoflag if echoflag in (False, None): # if no echo setting or False, return a Logger directly, # avoiding overhead of filtering logger = logging.getLogger(name) else: # if a specified echo flag, return an EchoLogger, # which checks the flag, overrides normal log # levels by calling logger._log() logger = InstanceLogger(echoflag, name) instance.logger = logger class echo_property(object): __doc__ = """\ When ``True``, enable log output for this element. This has the effect of setting the Python logging level for the namespace of this element's class and object reference. A value of boolean ``True`` indicates that the loglevel ``logging.INFO`` will be set for the logger, whereas the string value ``debug`` will set the loglevel to ``logging.DEBUG``. """ def __get__(self, instance, owner): if instance is None: return self else: return instance._echo def __set__(self, instance, value): instance_logger(instance, echoflag=value)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/events.py
# sqlalchemy/events.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Core event interfaces.""" from . import event from . import exc from . import util from .engine import Connectable from .engine import Dialect from .engine import Engine from .pool import Pool from .sql.base import SchemaEventTarget class DDLEvents(event.Events): """ Define event listeners for schema objects, that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget` subclasses, including :class:`_schema.MetaData`, :class:`_schema.Table`, :class:`_schema.Column`. :class:`_schema.MetaData` and :class:`_schema.Table` support events specifically regarding when CREATE and DROP DDL is emitted to the database. Attachment events are also provided to customize behavior whenever a child schema element is associated with a parent, such as, when a :class:`_schema.Column` is associated with its :class:`_schema.Table`, when a :class:`_schema.ForeignKeyConstraint` is associated with a :class:`_schema.Table`, etc. Example using the ``after_create`` event:: from sqlalchemy import event from sqlalchemy import Table, Column, Metadata, Integer m = MetaData() some_table = Table('some_table', m, Column('data', Integer)) def after_create(target, connection, **kw): connection.execute("ALTER TABLE %s SET name=foo_%s" % (target.name, target.name)) event.listen(some_table, "after_create", after_create) DDL events integrate closely with the :class:`.DDL` class and the :class:`.DDLElement` hierarchy of DDL clause constructs, which are themselves appropriate as listener callables:: from sqlalchemy import DDL event.listen( some_table, "after_create", DDL("ALTER TABLE %(table)s SET name=foo_%(table)s") ) The methods here define the name of an event as well as the names of members that are passed to listener functions. For all :class:`.DDLEvent` events, the ``propagate=True`` keyword argument will ensure that a given event handler is propagated to copies of the object, which are made when using the :meth:`_schema.Table.tometadata` method:: from sqlalchemy import DDL event.listen( some_table, "after_create", DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"), propagate=True ) new_table = some_table.tometadata(new_metadata) The above :class:`.DDL` object will also be associated with the :class:`_schema.Table` object represented by ``new_table``. .. seealso:: :ref:`event_toplevel` :class:`.DDLElement` :class:`.DDL` :ref:`schema_ddl_sequences` """ _target_class_doc = "SomeSchemaClassOrObject" _dispatch_target = SchemaEventTarget def before_create(self, target, connection, **kw): r"""Called before CREATE statements are emitted. :param target: the :class:`_schema.MetaData` or :class:`_schema.Table` object which is the target of the event. :param connection: the :class:`_engine.Connection` where the CREATE statement or statements will be emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. :func:`.event.listen` also accepts the ``propagate=True`` modifier for this event; when True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`_schema.Table.tometadata` is used. """ def after_create(self, target, connection, **kw): r"""Called after CREATE statements are emitted. :param target: the :class:`_schema.MetaData` or :class:`_schema.Table` object which is the target of the event. :param connection: the :class:`_engine.Connection` where the CREATE statement or statements have been emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. :func:`.event.listen` also accepts the ``propagate=True`` modifier for this event; when True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`_schema.Table.tometadata` is used. """ def before_drop(self, target, connection, **kw): r"""Called before DROP statements are emitted. :param target: the :class:`_schema.MetaData` or :class:`_schema.Table` object which is the target of the event. :param connection: the :class:`_engine.Connection` where the DROP statement or statements will be emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. :func:`.event.listen` also accepts the ``propagate=True`` modifier for this event; when True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`_schema.Table.tometadata` is used. """ def after_drop(self, target, connection, **kw): r"""Called after DROP statements are emitted. :param target: the :class:`_schema.MetaData` or :class:`_schema.Table` object which is the target of the event. :param connection: the :class:`_engine.Connection` where the DROP statement or statements have been emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. :func:`.event.listen` also accepts the ``propagate=True`` modifier for this event; when True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`_schema.Table.tometadata` is used. """ def before_parent_attach(self, target, parent): """Called before a :class:`.SchemaItem` is associated with a parent :class:`.SchemaItem`. :param target: the target object :param parent: the parent to which the target is being attached. :func:`.event.listen` also accepts the ``propagate=True`` modifier for this event; when True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`_schema.Table.tometadata` is used. """ def after_parent_attach(self, target, parent): """Called after a :class:`.SchemaItem` is associated with a parent :class:`.SchemaItem`. :param target: the target object :param parent: the parent to which the target is being attached. :func:`.event.listen` also accepts the ``propagate=True`` modifier for this event; when True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`_schema.Table.tometadata` is used. """ def column_reflect(self, inspector, table, column_info): """Called for each unit of 'column info' retrieved when a :class:`_schema.Table` is being reflected. The dictionary of column information as returned by the dialect is passed, and can be modified. The dictionary is that returned in each element of the list returned by :meth:`.reflection.Inspector.get_columns`: * ``name`` - the column's name * ``type`` - the type of this column, which should be an instance of :class:`~sqlalchemy.types.TypeEngine` * ``nullable`` - boolean flag if the column is NULL or NOT NULL * ``default`` - the column's server default value. This is normally specified as a plain string SQL expression, however the event can pass a :class:`.FetchedValue`, :class:`.DefaultClause`, or :func:`_expression.text` object as well. .. versionchanged:: 1.1.6 The :meth:`.DDLEvents.column_reflect` event allows a non string :class:`.FetchedValue`, :func:`_expression.text`, or derived object to be specified as the value of ``default`` in the column dictionary. * ``attrs`` - dict containing optional column attributes The event is called before any action is taken against this dictionary, and the contents can be modified. The :class:`_schema.Column` specific arguments ``info``, ``key``, and ``quote`` can also be added to the dictionary and will be passed to the constructor of :class:`_schema.Column`. Note that this event is only meaningful if either associated with the :class:`_schema.Table` class across the board, e.g.:: from sqlalchemy.schema import Table from sqlalchemy import event def listen_for_reflect(inspector, table, column_info): "receive a column_reflect event" # ... event.listen( Table, 'column_reflect', listen_for_reflect) ...or with a specific :class:`_schema.Table` instance using the ``listeners`` argument:: def listen_for_reflect(inspector, table, column_info): "receive a column_reflect event" # ... t = Table( 'sometable', autoload=True, listeners=[ ('column_reflect', listen_for_reflect) ]) This because the reflection process initiated by ``autoload=True`` completes within the scope of the constructor for :class:`_schema.Table`. :func:`.event.listen` also accepts the ``propagate=True`` modifier for this event; when True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`_schema.Table.tometadata` is used. """ class PoolEvents(event.Events): """Available events for :class:`_pool.Pool`. The methods here define the name of an event as well as the names of members that are passed to listener functions. e.g.:: from sqlalchemy import event def my_on_checkout(dbapi_conn, connection_rec, connection_proxy): "handle an on checkout event" event.listen(Pool, 'checkout', my_on_checkout) In addition to accepting the :class:`_pool.Pool` class and :class:`_pool.Pool` instances, :class:`_events.PoolEvents` also accepts :class:`_engine.Engine` objects and the :class:`_engine.Engine` class as targets, which will be resolved to the ``.pool`` attribute of the given engine or the :class:`_pool.Pool` class:: engine = create_engine("postgresql://scott:tiger@localhost/test") # will associate with engine.pool event.listen(engine, 'checkout', my_on_checkout) """ _target_class_doc = "SomeEngineOrPool" _dispatch_target = Pool @classmethod def _accept_with(cls, target): if isinstance(target, type): if issubclass(target, Engine): return Pool elif issubclass(target, Pool): return target elif isinstance(target, Engine): return target.pool else: return target def connect(self, dbapi_connection, connection_record): """Called at the moment a particular DBAPI connection is first created for a given :class:`_pool.Pool`. This event allows one to capture the point directly after which the DBAPI module-level ``.connect()`` method has been used in order to produce a new DBAPI connection. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. """ def first_connect(self, dbapi_connection, connection_record): """Called exactly once for the first time a DBAPI connection is checked out from a particular :class:`_pool.Pool`. The rationale for :meth:`_events.PoolEvents.first_connect` is to determine information about a particular series of database connections based on the settings used for all connections. Since a particular :class:`_pool.Pool` refers to a single "creator" function (which in terms of a :class:`_engine.Engine` refers to the URL and connection options used), it is typically valid to make observations about a single connection that can be safely assumed to be valid about all subsequent connections, such as the database version, the server and client encoding settings, collation settings, and many others. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. """ def checkout(self, dbapi_connection, connection_record, connection_proxy): """Called when a connection is retrieved from the Pool. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. :param connection_proxy: the :class:`._ConnectionFairy` object which will proxy the public interface of the DBAPI connection for the lifespan of the checkout. If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection. .. seealso:: :meth:`_events.ConnectionEvents.engine_connect` - a similar event which occurs upon creation of a new :class:`_engine.Connection`. """ def checkin(self, dbapi_connection, connection_record): """Called when a connection returns to the pool. Note that the connection may be closed, and may be None if the connection has been invalidated. ``checkin`` will not be called for detached connections. (They do not return to the pool.) :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. """ def reset(self, dbapi_connection, connection_record): """Called before the "reset" action occurs for a pooled connection. This event represents when the ``rollback()`` method is called on the DBAPI connection before it is returned to the pool. The behavior of "reset" can be controlled, including disabled, using the ``reset_on_return`` pool argument. The :meth:`_events.PoolEvents.reset` event is usually followed by the :meth:`_events.PoolEvents.checkin` event is called, except in those cases where the connection is discarded immediately after reset. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. .. seealso:: :meth:`_events.ConnectionEvents.rollback` :meth:`_events.ConnectionEvents.commit` """ def invalidate(self, dbapi_connection, connection_record, exception): """Called when a DBAPI connection is to be "invalidated". This event is called any time the :meth:`._ConnectionRecord.invalidate` method is invoked, either from API usage or via "auto-invalidation", without the ``soft`` flag. The event occurs before a final attempt to call ``.close()`` on the connection occurs. :param dbapi_connection: a DBAPI connection. :param connection_record: the :class:`._ConnectionRecord` managing the DBAPI connection. :param exception: the exception object corresponding to the reason for this invalidation, if any. May be ``None``. .. versionadded:: 0.9.2 Added support for connection invalidation listening. .. seealso:: :ref:`pool_connection_invalidation` """ def soft_invalidate(self, dbapi_connection, connection_record, exception): """Called when a DBAPI connection is to be "soft invalidated". This event is called any time the :meth:`._ConnectionRecord.invalidate` method is invoked with the ``soft`` flag. Soft invalidation refers to when the connection record that tracks this connection will force a reconnect after the current connection is checked in. It does not actively close the dbapi_connection at the point at which it is called. .. versionadded:: 1.0.3 """ def close(self, dbapi_connection, connection_record): """Called when a DBAPI connection is closed. The event is emitted before the close occurs. The close of a connection can fail; typically this is because the connection is already closed. If the close operation fails, the connection is discarded. The :meth:`.close` event corresponds to a connection that's still associated with the pool. To intercept close events for detached connections use :meth:`.close_detached`. .. versionadded:: 1.1 """ def detach(self, dbapi_connection, connection_record): """Called when a DBAPI connection is "detached" from a pool. This event is emitted after the detach occurs. The connection is no longer associated with the given connection record. .. versionadded:: 1.1 """ def close_detached(self, dbapi_connection): """Called when a detached DBAPI connection is closed. The event is emitted before the close occurs. The close of a connection can fail; typically this is because the connection is already closed. If the close operation fails, the connection is discarded. .. versionadded:: 1.1 """ class ConnectionEvents(event.Events): """Available events for :class:`.Connectable`, which includes :class:`_engine.Connection` and :class:`_engine.Engine`. The methods here define the name of an event as well as the names of members that are passed to listener functions. An event listener can be associated with any :class:`.Connectable` class or instance, such as an :class:`_engine.Engine`, e.g.:: from sqlalchemy import event, create_engine def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): log.info("Received statement: %s", statement) engine = create_engine('postgresql://scott:tiger@localhost/test') event.listen(engine, "before_cursor_execute", before_cursor_execute) or with a specific :class:`_engine.Connection`:: with engine.begin() as conn: @event.listens_for(conn, 'before_cursor_execute') def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): log.info("Received statement: %s", statement) When the methods are called with a `statement` parameter, such as in :meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and :meth:`.dbapi_error`, the statement is the exact SQL string that was prepared for transmission to the DBAPI ``cursor`` in the connection's :class:`.Dialect`. The :meth:`.before_execute` and :meth:`.before_cursor_execute` events can also be established with the ``retval=True`` flag, which allows modification of the statement and parameters to be sent to the database. The :meth:`.before_cursor_execute` event is particularly useful here to add ad-hoc string transformations, such as comments, to all executions:: from sqlalchemy.engine import Engine from sqlalchemy import event @event.listens_for(Engine, "before_cursor_execute", retval=True) def comment_sql_calls(conn, cursor, statement, parameters, context, executemany): statement = statement + " -- some comment" return statement, parameters .. note:: :class:`_events.ConnectionEvents` can be established on any combination of :class:`_engine.Engine`, :class:`_engine.Connection`, as well as instances of each of those classes. Events across all four scopes will fire off for a given instance of :class:`_engine.Connection`. However, for performance reasons, the :class:`_engine.Connection` object determines at instantiation time whether or not its parent :class:`_engine.Engine` has event listeners established. Event listeners added to the :class:`_engine.Engine` class or to an instance of :class:`_engine.Engine` *after* the instantiation of a dependent :class:`_engine.Connection` instance will usually *not* be available on that :class:`_engine.Connection` instance. The newly added listeners will instead take effect for :class:`_engine.Connection` instances created subsequent to those event listeners being established on the parent :class:`_engine.Engine` class or instance. :param retval=False: Applies to the :meth:`.before_execute` and :meth:`.before_cursor_execute` events only. When True, the user-defined event function must have a return value, which is a tuple of parameters that replace the given statement and parameters. See those methods for a description of specific return arguments. """ _target_class_doc = "SomeEngine" _dispatch_target = Connectable @classmethod def _listen(cls, event_key, retval=False): target, identifier, fn = ( event_key.dispatch_target, event_key.identifier, event_key._listen_fn, ) target._has_events = True if not retval: if identifier == "before_execute": orig_fn = fn def wrap_before_execute( conn, clauseelement, multiparams, params ): orig_fn(conn, clauseelement, multiparams, params) return clauseelement, multiparams, params fn = wrap_before_execute elif identifier == "before_cursor_execute": orig_fn = fn def wrap_before_cursor_execute( conn, cursor, statement, parameters, context, executemany ): orig_fn( conn, cursor, statement, parameters, context, executemany, ) return statement, parameters fn = wrap_before_cursor_execute elif retval and identifier not in ( "before_execute", "before_cursor_execute", "handle_error", ): raise exc.ArgumentError( "Only the 'before_execute', " "'before_cursor_execute' and 'handle_error' engine " "event listeners accept the 'retval=True' " "argument." ) event_key.with_wrapper(fn).base_listen() def before_execute(self, conn, clauseelement, multiparams, params): """Intercept high level execute() events, receiving uncompiled SQL constructs and other objects prior to rendering into SQL. This event is good for debugging SQL compilation issues as well as early manipulation of the parameters being sent to the database, as the parameter lists will be in a consistent format here. This event can be optionally established with the ``retval=True`` flag. The ``clauseelement``, ``multiparams``, and ``params`` arguments should be returned as a three-tuple in this case:: @event.listens_for(Engine, "before_execute", retval=True) def before_execute(conn, clauseelement, multiparams, params): # do something with clauseelement, multiparams, params return clauseelement, multiparams, params :param conn: :class:`_engine.Connection` object :param clauseelement: SQL expression construct, :class:`.Compiled` instance, or string statement passed to :meth:`_engine.Connection.execute`. :param multiparams: Multiple parameter sets, a list of dictionaries. :param params: Single parameter set, a single dictionary. .. seealso:: :meth:`.before_cursor_execute` """ def after_execute(self, conn, clauseelement, multiparams, params, result): """Intercept high level execute() events after execute. :param conn: :class:`_engine.Connection` object :param clauseelement: SQL expression construct, :class:`.Compiled` instance, or string statement passed to :meth:`_engine.Connection.execute`. :param multiparams: Multiple parameter sets, a list of dictionaries. :param params: Single parameter set, a single dictionary. :param result: :class:`_engine.ResultProxy` generated by the execution . """ def before_cursor_execute( self, conn, cursor, statement, parameters, context, executemany ): """Intercept low-level cursor execute() events before execution, receiving the string SQL statement and DBAPI-specific parameter list to be invoked against a cursor. This event is a good choice for logging as well as late modifications to the SQL string. It's less ideal for parameter modifications except for those which are specific to a target backend. This event can be optionally established with the ``retval=True`` flag. The ``statement`` and ``parameters`` arguments should be returned as a two-tuple in this case:: @event.listens_for(Engine, "before_cursor_execute", retval=True) def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): # do something with statement, parameters return statement, parameters See the example at :class:`_events.ConnectionEvents`. :param conn: :class:`_engine.Connection` object :param cursor: DBAPI cursor object :param statement: string SQL statement, as to be passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param executemany: boolean, if ``True``, this is an ``executemany()`` call, if ``False``, this is an ``execute()`` call. .. seealso:: :meth:`.before_execute` :meth:`.after_cursor_execute` """ def after_cursor_execute( self, conn, cursor, statement, parameters, context, executemany ): """Intercept low-level cursor execute() events after execution. :param conn: :class:`_engine.Connection` object :param cursor: DBAPI cursor object. Will have results pending if the statement was a SELECT, but these should not be consumed as they will be needed by the :class:`_engine.ResultProxy`. :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param executemany: boolean, if ``True``, this is an ``executemany()`` call, if ``False``, this is an ``execute()`` call. """ @util.deprecated( "0.9", "The :meth:`_events.ConnectionEvents.dbapi_error` " "event is deprecated and will be removed in a future release. " "Please refer to the :meth:`_events.ConnectionEvents.handle_error` " "event.", ) def dbapi_error( self, conn, cursor, statement, parameters, context, exception ): """Intercept a raw DBAPI error. This event is called with the DBAPI exception instance received from the DBAPI itself, *before* SQLAlchemy wraps the exception with it's own exception wrappers, and before any other operations are performed on the DBAPI cursor; the existing transaction remains in effect as well as any state on the cursor. The use case here is to inject low-level exception handling into an :class:`_engine.Engine`, typically for logging and debugging purposes. .. warning:: Code should **not** modify any state or throw any exceptions here as this will interfere with SQLAlchemy's cleanup and error handling routines. For exception modification, please refer to the new :meth:`_events.ConnectionEvents.handle_error` event. Subsequent to this hook, SQLAlchemy may attempt any number of operations on the connection/cursor, including closing the cursor, rolling back of the transaction in the case of connectionless execution, and disposing of the entire connection pool if a "disconnect" was detected. The exception is then wrapped in a SQLAlchemy DBAPI exception wrapper and re-thrown. :param conn: :class:`_engine.Connection` object :param cursor: DBAPI cursor object :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param exception: The **unwrapped** exception emitted directly from the DBAPI. The class here is specific to the DBAPI module in use. """ def handle_error(self, exception_context): r"""Intercept all exceptions processed by the :class:`_engine.Connection`. This includes all exceptions emitted by the DBAPI as well as within SQLAlchemy's statement invocation process, including encoding errors and other statement validation errors. Other areas in which the event is invoked include transaction begin and end, result row fetching, cursor creation. Note that :meth:`.handle_error` may support new kinds of exceptions and new calling scenarios at *any time*. Code which uses this event must expect new calling patterns to be present in minor releases. To support the wide variety of members that correspond to an exception, as well as to allow extensibility of the event without backwards incompatibility, the sole argument received is an instance of :class:`.ExceptionContext`. This object contains data members representing detail about the exception. Use cases supported by this hook include: * read-only, low-level exception handling for logging and debugging purposes * exception re-writing * Establishing or disabling whether a connection or the owning connection pool is invalidated or expired in response to a specific exception. The hook is called while the cursor from the failed operation (if any) is still open and accessible. Special cleanup operations can be called on this cursor; SQLAlchemy will attempt to close this cursor subsequent to this hook being invoked. If the connection is in "autocommit" mode, the transaction also remains open within the scope of this hook; the rollback of the per-statement transaction also occurs after the hook is called. For the common case of detecting a "disconnect" situation which is not currently handled by the SQLAlchemy dialect, the :attr:`.ExceptionContext.is_disconnect` flag can be set to True which will cause the exception to be considered as a disconnect situation, which typically results in the connection pool being invalidated:: @event.listens_for(Engine, "handle_error") def handle_exception(context): if isinstance(context.original_exception, pyodbc.Error): for code in ( '08S01', '01002', '08003', '08007', '08S02', '08001', 'HYT00', 'HY010'): if code in str(context.original_exception): context.is_disconnect = True A handler function has two options for replacing the SQLAlchemy-constructed exception into one that is user defined. It can either raise this new exception directly, in which case all further event listeners are bypassed and the exception will be raised, after appropriate cleanup as taken place:: @event.listens_for(Engine, "handle_error") def handle_exception(context): if isinstance(context.original_exception, psycopg2.OperationalError) and \ "failed" in str(context.original_exception): raise MySpecialException("failed operation") .. warning:: Because the :meth:`_events.ConnectionEvents.handle_error` event specifically provides for exceptions to be re-thrown as the ultimate exception raised by the failed statement, **stack traces will be misleading** if the user-defined event handler itself fails and throws an unexpected exception; the stack trace may not illustrate the actual code line that failed! It is advised to code carefully here and use logging and/or inline debugging if unexpected exceptions are occurring. Alternatively, a "chained" style of event handling can be used, by configuring the handler with the ``retval=True`` modifier and returning the new exception instance from the function. In this case, event handling will continue onto the next handler. The "chained" exception is available using :attr:`.ExceptionContext.chained_exception`:: @event.listens_for(Engine, "handle_error", retval=True) def handle_exception(context): if context.chained_exception is not None and \ "special" in context.chained_exception.message: return MySpecialException("failed", cause=context.chained_exception) Handlers that return ``None`` may be used within the chain; when a handler returns ``None``, the previous exception instance, if any, is maintained as the current exception that is passed onto the next handler. When a custom exception is raised or returned, SQLAlchemy raises this new exception as-is, it is not wrapped by any SQLAlchemy object. If the exception is not a subclass of :class:`sqlalchemy.exc.StatementError`, certain features may not be available; currently this includes the ORM's feature of adding a detail hint about "autoflush" to exceptions raised within the autoflush process. :param context: an :class:`.ExceptionContext` object. See this class for details on all available members. .. versionadded:: 0.9.7 Added the :meth:`_events.ConnectionEvents.handle_error` hook. .. versionchanged:: 1.1 The :meth:`.handle_error` event will now receive all exceptions that inherit from ``BaseException``, including ``SystemExit`` and ``KeyboardInterrupt``. The setting for :attr:`.ExceptionContext.is_disconnect` is ``True`` in this case and the default for :attr:`.ExceptionContext.invalidate_pool_on_disconnect` is ``False``. .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now invoked when an :class:`_engine.Engine` fails during the initial call to :meth:`_engine.Engine.connect`, as well as when a :class:`_engine.Connection` object encounters an error during a reconnect operation. .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is not fired off when a dialect makes use of the ``skip_user_error_events`` execution option. This is used by dialects which intend to catch SQLAlchemy-specific exceptions within specific operations, such as when the MySQL dialect detects a table not present within the ``has_table()`` dialect method. Prior to 1.0.0, code which implements :meth:`.handle_error` needs to ensure that exceptions thrown in these scenarios are re-raised without modification. """ def engine_connect(self, conn, branch): """Intercept the creation of a new :class:`_engine.Connection`. This event is called typically as the direct result of calling the :meth:`_engine.Engine.connect` method. It differs from the :meth:`_events.PoolEvents.connect` method, which refers to the actual connection to a database at the DBAPI level; a DBAPI connection may be pooled and reused for many operations. In contrast, this event refers only to the production of a higher level :class:`_engine.Connection` wrapper around such a DBAPI connection. It also differs from the :meth:`_events.PoolEvents.checkout` event in that it is specific to the :class:`_engine.Connection` object, not the DBAPI connection that :meth:`_events.PoolEvents.checkout` deals with, although this DBAPI connection is available here via the :attr:`_engine.Connection.connection` attribute. But note there can in fact be multiple :meth:`_events.PoolEvents.checkout` events within the lifespan of a single :class:`_engine.Connection` object, if that :class:`_engine.Connection` is invalidated and re-established. There can also be multiple :class:`_engine.Connection` objects generated for the same already-checked-out DBAPI connection, in the case that a "branch" of a :class:`_engine.Connection` is produced. :param conn: :class:`_engine.Connection` object. :param branch: if True, this is a "branch" of an existing :class:`_engine.Connection`. A branch is generated within the course of a statement execution to invoke supplemental statements, most typically to pre-execute a SELECT of a default value for the purposes of an INSERT statement. .. versionadded:: 0.9.0 .. seealso:: :ref:`pool_disconnects_pessimistic` - illustrates how to use :meth:`_events.ConnectionEvents.engine_connect` to transparently ensure pooled connections are connected to the database. :meth:`_events.PoolEvents.checkout` the lower-level pool checkout event for an individual DBAPI connection :meth:`_events.ConnectionEvents.set_connection_execution_options` - a copy of a :class:`_engine.Connection` is also made when the :meth:`_engine.Connection.execution_options` method is called. """ def set_connection_execution_options(self, conn, opts): """Intercept when the :meth:`_engine.Connection.execution_options` method is called. This method is called after the new :class:`_engine.Connection` has been produced, with the newly updated execution options collection, but before the :class:`.Dialect` has acted upon any of those new options. Note that this method is not called when a new :class:`_engine.Connection` is produced which is inheriting execution options from its parent :class:`_engine.Engine`; to intercept this condition, use the :meth:`_events.ConnectionEvents.engine_connect` event. :param conn: The newly copied :class:`_engine.Connection` object :param opts: dictionary of options that were passed to the :meth:`_engine.Connection.execution_options` method. .. versionadded:: 0.9.0 .. seealso:: :meth:`_events.ConnectionEvents.set_engine_execution_options` - event which is called when :meth:`_engine.Engine.execution_options` is called. """ def set_engine_execution_options(self, engine, opts): """Intercept when the :meth:`_engine.Engine.execution_options` method is called. The :meth:`_engine.Engine.execution_options` method produces a shallow copy of the :class:`_engine.Engine` which stores the new options. That new :class:`_engine.Engine` is passed here. A particular application of this method is to add a :meth:`_events.ConnectionEvents.engine_connect` event handler to the given :class:`_engine.Engine` which will perform some per- :class:`_engine.Connection` task specific to these execution options. :param conn: The newly copied :class:`_engine.Engine` object :param opts: dictionary of options that were passed to the :meth:`_engine.Connection.execution_options` method. .. versionadded:: 0.9.0 .. seealso:: :meth:`_events.ConnectionEvents.set_connection_execution_options` - event which is called when :meth:`_engine.Connection.execution_options` is called. """ def engine_disposed(self, engine): """Intercept when the :meth:`_engine.Engine.dispose` method is called. The :meth:`_engine.Engine.dispose` method instructs the engine to "dispose" of it's connection pool (e.g. :class:`_pool.Pool`), and replaces it with a new one. Disposing of the old pool has the effect that existing checked-in connections are closed. The new pool does not establish any new connections until it is first used. This event can be used to indicate that resources related to the :class:`_engine.Engine` should also be cleaned up, keeping in mind that the :class:`_engine.Engine` can still be used for new requests in which case it re-acquires connection resources. .. versionadded:: 1.0.5 """ def begin(self, conn): """Intercept begin() events. :param conn: :class:`_engine.Connection` object """ def rollback(self, conn): """Intercept rollback() events, as initiated by a :class:`.Transaction`. Note that the :class:`_pool.Pool` also "auto-rolls back" a DBAPI connection upon checkin, if the ``reset_on_return`` flag is set to its default value of ``'rollback'``. To intercept this rollback, use the :meth:`_events.PoolEvents.reset` hook. :param conn: :class:`_engine.Connection` object .. seealso:: :meth:`_events.PoolEvents.reset` """ def commit(self, conn): """Intercept commit() events, as initiated by a :class:`.Transaction`. Note that the :class:`_pool.Pool` may also "auto-commit" a DBAPI connection upon checkin, if the ``reset_on_return`` flag is set to the value ``'commit'``. To intercept this commit, use the :meth:`_events.PoolEvents.reset` hook. :param conn: :class:`_engine.Connection` object """ def savepoint(self, conn, name): """Intercept savepoint() events. :param conn: :class:`_engine.Connection` object :param name: specified name used for the savepoint. """ def rollback_savepoint(self, conn, name, context): """Intercept rollback_savepoint() events. :param conn: :class:`_engine.Connection` object :param name: specified name used for the savepoint. :param context: :class:`.ExecutionContext` in use. May be ``None``. """ def release_savepoint(self, conn, name, context): """Intercept release_savepoint() events. :param conn: :class:`_engine.Connection` object :param name: specified name used for the savepoint. :param context: :class:`.ExecutionContext` in use. May be ``None``. """ def begin_twophase(self, conn, xid): """Intercept begin_twophase() events. :param conn: :class:`_engine.Connection` object :param xid: two-phase XID identifier """ def prepare_twophase(self, conn, xid): """Intercept prepare_twophase() events. :param conn: :class:`_engine.Connection` object :param xid: two-phase XID identifier """ def rollback_twophase(self, conn, xid, is_prepared): """Intercept rollback_twophase() events. :param conn: :class:`_engine.Connection` object :param xid: two-phase XID identifier :param is_prepared: boolean, indicates if :meth:`.TwoPhaseTransaction.prepare` was called. """ def commit_twophase(self, conn, xid, is_prepared): """Intercept commit_twophase() events. :param conn: :class:`_engine.Connection` object :param xid: two-phase XID identifier :param is_prepared: boolean, indicates if :meth:`.TwoPhaseTransaction.prepare` was called. """ class DialectEvents(event.Events): """event interface for execution-replacement functions. These events allow direct instrumentation and replacement of key dialect functions which interact with the DBAPI. .. note:: :class:`.DialectEvents` hooks should be considered **semi-public** and experimental. These hooks are not for general use and are only for those situations where intricate re-statement of DBAPI mechanics must be injected onto an existing dialect. For general-use statement-interception events, please use the :class:`_events.ConnectionEvents` interface. .. seealso:: :meth:`_events.ConnectionEvents.before_cursor_execute` :meth:`_events.ConnectionEvents.before_execute` :meth:`_events.ConnectionEvents.after_cursor_execute` :meth:`_events.ConnectionEvents.after_execute` .. versionadded:: 0.9.4 """ _target_class_doc = "SomeEngine" _dispatch_target = Dialect @classmethod def _listen(cls, event_key, retval=False): target = event_key.dispatch_target target._has_events = True event_key.base_listen() @classmethod def _accept_with(cls, target): if isinstance(target, type): if issubclass(target, Engine): return Dialect elif issubclass(target, Dialect): return target elif isinstance(target, Engine): return target.dialect else: return target def do_connect(self, dialect, conn_rec, cargs, cparams): """Receive connection arguments before a connection is made. Return a DBAPI connection to halt further events from invoking; the returned connection will be used. Alternatively, the event can manipulate the cargs and/or cparams collections; cargs will always be a Python list that can be mutated in-place and cparams a Python dictionary. Return None to allow control to pass to the next event handler and ultimately to allow the dialect to connect normally, given the updated arguments. .. versionadded:: 1.0.3 .. seealso:: :ref:`custom_dbapi_args` """ def do_executemany(self, cursor, statement, parameters, context): """Receive a cursor to have executemany() called. Return the value True to halt further events from invoking, and to indicate that the cursor execution has already taken place within the event handler. """ def do_execute_no_params(self, cursor, statement, context): """Receive a cursor to have execute() with no parameters called. Return the value True to halt further events from invoking, and to indicate that the cursor execution has already taken place within the event handler. """ def do_execute(self, cursor, statement, parameters, context): """Receive a cursor to have execute() called. Return the value True to halt further events from invoking, and to indicate that the cursor execution has already taken place within the event handler. """ def do_setinputsizes( self, inputsizes, cursor, statement, parameters, context ): """Receive the setinputsizes dictionary for possible modification. This event is emitted in the case where the dialect makes use of the DBAPI ``cursor.setinputsizes()`` method which passes information about parameter binding for a particular statement. The given ``inputsizes`` dictionary will contain :class:`.BindParameter` objects as keys, linked to DBAPI-specific type objects as values; for parameters that are not bound, they are added to the dictionary with ``None`` as the value, which means the parameter will not be included in the ultimate setinputsizes call. The event may be used to inspect and/or log the datatypes that are being bound, as well as to modify the dictionary in place. Parameters can be added, modified, or removed from this dictionary. Callers will typically want to inspect the :attr:`.BindParameter.type` attribute of the given bind objects in order to make decisions about the DBAPI object. After the event, the ``inputsizes`` dictionary is converted into an appropriate datastructure to be passed to ``cursor.setinputsizes``; either a list for a positional bound parameter execution style, or a dictionary of string parameter keys to DBAPI type objects for a named bound parameter execution style. Most dialects **do not use** this method at all; the only built-in dialect which uses this hook is the cx_Oracle dialect. The hook here is made available so as to allow customization of how datatypes are set up with the cx_Oracle DBAPI. .. versionadded:: 1.2.9 .. seealso:: :ref:`cx_oracle_setinputsizes` """ pass
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/__init__.py
# sqlalchemy/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import util as _util # noqa from .inspection import inspect # noqa from .schema import BLANK_SCHEMA # noqa from .schema import CheckConstraint # noqa from .schema import Column # noqa from .schema import ColumnDefault # noqa from .schema import Computed # noqa from .schema import Constraint # noqa from .schema import DDL # noqa from .schema import DefaultClause # noqa from .schema import FetchedValue # noqa from .schema import ForeignKey # noqa from .schema import ForeignKeyConstraint # noqa from .schema import IdentityOptions # noqa from .schema import Index # noqa from .schema import MetaData # noqa from .schema import PassiveDefault # noqa from .schema import PrimaryKeyConstraint # noqa from .schema import Sequence # noqa from .schema import Table # noqa from .schema import ThreadLocalMetaData # noqa from .schema import UniqueConstraint # noqa from .sql import alias # noqa from .sql import all_ # noqa from .sql import and_ # noqa from .sql import any_ # noqa from .sql import asc # noqa from .sql import between # noqa from .sql import bindparam # noqa from .sql import case # noqa from .sql import cast # noqa from .sql import collate # noqa from .sql import column # noqa from .sql import delete # noqa from .sql import desc # noqa from .sql import distinct # noqa from .sql import except_ # noqa from .sql import except_all # noqa from .sql import exists # noqa from .sql import extract # noqa from .sql import false # noqa from .sql import func # noqa from .sql import funcfilter # noqa from .sql import insert # noqa from .sql import intersect # noqa from .sql import intersect_all # noqa from .sql import join # noqa from .sql import lateral # noqa from .sql import literal # noqa from .sql import literal_column # noqa from .sql import modifier # noqa from .sql import not_ # noqa from .sql import null # noqa from .sql import nullsfirst # noqa from .sql import nullslast # noqa from .sql import or_ # noqa from .sql import outerjoin # noqa from .sql import outparam # noqa from .sql import over # noqa from .sql import select # noqa from .sql import subquery # noqa from .sql import table # noqa from .sql import tablesample # noqa from .sql import text # noqa from .sql import true # noqa from .sql import tuple_ # noqa from .sql import type_coerce # noqa from .sql import union # noqa from .sql import union_all # noqa from .sql import update # noqa from .sql import within_group # noqa from .types import ARRAY # noqa from .types import BIGINT # noqa from .types import BigInteger # noqa from .types import BINARY # noqa from .types import Binary # noqa from .types import BLOB # noqa from .types import BOOLEAN # noqa from .types import Boolean # noqa from .types import CHAR # noqa from .types import CLOB # noqa from .types import DATE # noqa from .types import Date # noqa from .types import DATETIME # noqa from .types import DateTime # noqa from .types import DECIMAL # noqa from .types import Enum # noqa from .types import FLOAT # noqa from .types import Float # noqa from .types import INT # noqa from .types import INTEGER # noqa from .types import Integer # noqa from .types import Interval # noqa from .types import JSON # noqa from .types import LargeBinary # noqa from .types import NCHAR # noqa from .types import NUMERIC # noqa from .types import Numeric # noqa from .types import NVARCHAR # noqa from .types import PickleType # noqa from .types import REAL # noqa from .types import SMALLINT # noqa from .types import SmallInteger # noqa from .types import String # noqa from .types import TEXT # noqa from .types import Text # noqa from .types import TIME # noqa from .types import Time # noqa from .types import TIMESTAMP # noqa from .types import TypeDecorator # noqa from .types import Unicode # noqa from .types import UnicodeText # noqa from .types import VARBINARY # noqa from .types import VARCHAR # noqa from .engine import create_engine # noqa nosort from .engine import engine_from_config # noqa nosort __version__ = '1.3.18' def __go(lcls): global __all__ from . import events # noqa from . import util as _sa_util import inspect as _inspect __all__ = sorted( name for name, obj in lcls.items() if not (name.startswith("_") or _inspect.ismodule(obj)) ) _sa_util.dependencies.resolve_all("sqlalchemy") from . import exc exc._version_token = "".join(__version__.split(".")[0:2]) __go(locals())
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/types.py
# types.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Compatibility namespace for sqlalchemy.sql.types. """ __all__ = [ "TypeEngine", "TypeDecorator", "UserDefinedType", "INT", "CHAR", "VARCHAR", "NCHAR", "NVARCHAR", "TEXT", "Text", "FLOAT", "NUMERIC", "REAL", "DECIMAL", "TIMESTAMP", "DATETIME", "CLOB", "BLOB", "BINARY", "VARBINARY", "BOOLEAN", "BIGINT", "SMALLINT", "INTEGER", "DATE", "TIME", "String", "Integer", "SmallInteger", "BigInteger", "Numeric", "Float", "DateTime", "Date", "Time", "LargeBinary", "Binary", "Boolean", "Unicode", "Concatenable", "UnicodeText", "PickleType", "Interval", "Enum", "Indexable", "ARRAY", "JSON", ] from .sql.sqltypes import _Binary # noqa from .sql.sqltypes import ARRAY # noqa from .sql.sqltypes import BIGINT # noqa from .sql.sqltypes import BigInteger # noqa from .sql.sqltypes import BINARY # noqa from .sql.sqltypes import Binary # noqa from .sql.sqltypes import BLOB # noqa from .sql.sqltypes import BOOLEAN # noqa from .sql.sqltypes import Boolean # noqa from .sql.sqltypes import CHAR # noqa from .sql.sqltypes import CLOB # noqa from .sql.sqltypes import Concatenable # noqa from .sql.sqltypes import DATE # noqa from .sql.sqltypes import Date # noqa from .sql.sqltypes import DATETIME # noqa from .sql.sqltypes import DateTime # noqa from .sql.sqltypes import DECIMAL # noqa from .sql.sqltypes import Enum # noqa from .sql.sqltypes import FLOAT # noqa from .sql.sqltypes import Float # noqa from .sql.sqltypes import Indexable # noqa from .sql.sqltypes import INT # noqa from .sql.sqltypes import INTEGER # noqa from .sql.sqltypes import Integer # noqa from .sql.sqltypes import Interval # noqa from .sql.sqltypes import JSON # noqa from .sql.sqltypes import LargeBinary # noqa from .sql.sqltypes import MatchType # noqa from .sql.sqltypes import NCHAR # noqa from .sql.sqltypes import NULLTYPE # noqa from .sql.sqltypes import NullType # noqa from .sql.sqltypes import NUMERIC # noqa from .sql.sqltypes import Numeric # noqa from .sql.sqltypes import NVARCHAR # noqa from .sql.sqltypes import PickleType # noqa from .sql.sqltypes import REAL # noqa from .sql.sqltypes import SchemaType # noqa from .sql.sqltypes import SMALLINT # noqa from .sql.sqltypes import SmallInteger # noqa from .sql.sqltypes import String # noqa from .sql.sqltypes import STRINGTYPE # noqa from .sql.sqltypes import TEXT # noqa from .sql.sqltypes import Text # noqa from .sql.sqltypes import TIME # noqa from .sql.sqltypes import Time # noqa from .sql.sqltypes import TIMESTAMP # noqa from .sql.sqltypes import Unicode # noqa from .sql.sqltypes import UnicodeText # noqa from .sql.sqltypes import VARBINARY # noqa from .sql.sqltypes import VARCHAR # noqa from .sql.type_api import adapt_type # noqa from .sql.type_api import to_instance # noqa from .sql.type_api import TypeDecorator # noqa from .sql.type_api import TypeEngine # noqa from .sql.type_api import UserDefinedType # noqa from .sql.type_api import Variant # noqa
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/inspection.py
# sqlalchemy/inspect.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The inspection module provides the :func:`_sa.inspect` function, which delivers runtime information about a wide variety of SQLAlchemy objects, both within the Core as well as the ORM. The :func:`_sa.inspect` function is the entry point to SQLAlchemy's public API for viewing the configuration and construction of in-memory objects. Depending on the type of object passed to :func:`_sa.inspect`, the return value will either be a related object which provides a known interface, or in many cases it will return the object itself. The rationale for :func:`_sa.inspect` is twofold. One is that it replaces the need to be aware of a large variety of "information getting" functions in SQLAlchemy, such as :meth:`_reflection.Inspector.from_engine`, :func:`.orm.attributes.instance_state`, :func:`_orm.class_mapper`, and others. The other is that the return value of :func:`_sa.inspect` is guaranteed to obey a documented API, thus allowing third party tools which build on top of SQLAlchemy configurations to be constructed in a forwards-compatible way. """ from . import exc from . import util _registrars = util.defaultdict(list) def inspect(subject, raiseerr=True): """Produce an inspection object for the given target. The returned value in some cases may be the same object as the one given, such as if a :class:`_orm.Mapper` object is passed. In other cases, it will be an instance of the registered inspection type for the given object, such as if an :class:`_engine.Engine` is passed, an :class:`_reflection.Inspector` object is returned. :param subject: the subject to be inspected. :param raiseerr: When ``True``, if the given subject does not correspond to a known SQLAlchemy inspected type, :class:`sqlalchemy.exc.NoInspectionAvailable` is raised. If ``False``, ``None`` is returned. """ type_ = type(subject) for cls in type_.__mro__: if cls in _registrars: reg = _registrars[cls] if reg is True: return subject ret = reg(subject) if ret is not None: break else: reg = ret = None if raiseerr and (reg is None or ret is None): raise exc.NoInspectionAvailable( "No inspection system is " "available for object of type %s" % type_ ) return ret def _inspects(*types): def decorate(fn_or_cls): for type_ in types: if type_ in _registrars: raise AssertionError( "Type %s is already " "registered" % type_ ) _registrars[type_] = fn_or_cls return fn_or_cls return decorate def _self_inspects(cls): _inspects(cls)(True) return cls
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/exc.py
# sqlalchemy/exc.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Exceptions used with SQLAlchemy. The base exception class is :exc:`.SQLAlchemyError`. Exceptions which are raised as a result of DBAPI exceptions are all subclasses of :exc:`.DBAPIError`. """ from .util import compat _version_token = None class SQLAlchemyError(Exception): """Generic error class.""" code = None def __init__(self, *arg, **kw): code = kw.pop("code", None) if code is not None: self.code = code super(SQLAlchemyError, self).__init__(*arg, **kw) def _code_str(self): if not self.code: return "" else: return ( "(Background on this error at: " "http://sqlalche.me/e/%s/%s)" % (_version_token, self.code,) ) def _message(self, as_unicode=compat.py3k): # rules: # # 1. under py2k, for __str__ return single string arg as it was # given without converting to unicode. for __unicode__ # do a conversion but check that it's not unicode already just in # case # # 2. under py3k, single arg string will usually be a unicode # object, but since __str__() must return unicode, check for # bytestring just in case # # 3. for multiple self.args, this is not a case in current # SQLAlchemy though this is happening in at least one known external # library, call str() which does a repr(). # if len(self.args) == 1: text = self.args[0] if as_unicode and isinstance(text, compat.binary_types): return compat.decode_backslashreplace(text, "utf-8") else: return self.args[0] else: # this is not a normal case within SQLAlchemy but is here for # compatibility with Exception.args - the str() comes out as # a repr() of the tuple return str(self.args) def _sql_message(self, as_unicode): message = self._message(as_unicode) if self.code: message = "%s %s" % (message, self._code_str()) return message def __str__(self): return self._sql_message(compat.py3k) def __unicode__(self): return self._sql_message(as_unicode=True) class ArgumentError(SQLAlchemyError): """Raised when an invalid or conflicting function argument is supplied. This error generally corresponds to construction time state errors. """ class ObjectNotExecutableError(ArgumentError): """Raised when an object is passed to .execute() that can't be executed as SQL. .. versionadded:: 1.1 """ def __init__(self, target): super(ObjectNotExecutableError, self).__init__( "Not an executable object: %r" % target ) class NoSuchModuleError(ArgumentError): """Raised when a dynamically-loaded module (usually a database dialect) of a particular name cannot be located.""" class NoForeignKeysError(ArgumentError): """Raised when no foreign keys can be located between two selectables during a join.""" class AmbiguousForeignKeysError(ArgumentError): """Raised when more than one foreign key matching can be located between two selectables during a join.""" class CircularDependencyError(SQLAlchemyError): """Raised by topological sorts when a circular dependency is detected. There are two scenarios where this error occurs: * In a Session flush operation, if two objects are mutually dependent on each other, they can not be inserted or deleted via INSERT or DELETE statements alone; an UPDATE will be needed to post-associate or pre-deassociate one of the foreign key constrained values. The ``post_update`` flag described at :ref:`post_update` can resolve this cycle. * In a :attr:`_schema.MetaData.sorted_tables` operation, two :class:`_schema.ForeignKey` or :class:`_schema.ForeignKeyConstraint` objects mutually refer to each other. Apply the ``use_alter=True`` flag to one or both, see :ref:`use_alter`. """ def __init__(self, message, cycles, edges, msg=None, code=None): if msg is None: message += " (%s)" % ", ".join(repr(s) for s in cycles) else: message = msg SQLAlchemyError.__init__(self, message, code=code) self.cycles = cycles self.edges = edges def __reduce__(self): return self.__class__, (None, self.cycles, self.edges, self.args[0]) class CompileError(SQLAlchemyError): """Raised when an error occurs during SQL compilation""" class UnsupportedCompilationError(CompileError): """Raised when an operation is not supported by the given compiler. .. seealso:: :ref:`faq_sql_expression_string` :ref:`error_l7de` """ code = "l7de" def __init__(self, compiler, element_type): super(UnsupportedCompilationError, self).__init__( "Compiler %r can't render element of type %s" % (compiler, element_type) ) class IdentifierError(SQLAlchemyError): """Raised when a schema name is beyond the max character limit""" class DisconnectionError(SQLAlchemyError): """A disconnect is detected on a raw DB-API connection. This error is raised and consumed internally by a connection pool. It can be raised by the :meth:`_events.PoolEvents.checkout` event so that the host pool forces a retry; the exception will be caught three times in a row before the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError` regarding the connection attempt. """ invalidate_pool = False class InvalidatePoolError(DisconnectionError): """Raised when the connection pool should invalidate all stale connections. A subclass of :class:`_exc.DisconnectionError` that indicates that the disconnect situation encountered on the connection probably means the entire pool should be invalidated, as the database has been restarted. This exception will be handled otherwise the same way as :class:`_exc.DisconnectionError`, allowing three attempts to reconnect before giving up. .. versionadded:: 1.2 """ invalidate_pool = True class TimeoutError(SQLAlchemyError): # noqa """Raised when a connection pool times out on getting a connection.""" class InvalidRequestError(SQLAlchemyError): """SQLAlchemy was asked to do something it can't do. This error generally corresponds to runtime state errors. """ class NoInspectionAvailable(InvalidRequestError): """A subject passed to :func:`sqlalchemy.inspection.inspect` produced no context for inspection.""" class ResourceClosedError(InvalidRequestError): """An operation was requested from a connection, cursor, or other object that's in a closed state.""" class NoSuchColumnError(KeyError, InvalidRequestError): """A nonexistent column is requested from a ``RowProxy``.""" class NoReferenceError(InvalidRequestError): """Raised by ``ForeignKey`` to indicate a reference cannot be resolved.""" class NoReferencedTableError(NoReferenceError): """Raised by ``ForeignKey`` when the referred ``Table`` cannot be located. """ def __init__(self, message, tname): NoReferenceError.__init__(self, message) self.table_name = tname def __reduce__(self): return self.__class__, (self.args[0], self.table_name) class NoReferencedColumnError(NoReferenceError): """Raised by ``ForeignKey`` when the referred ``Column`` cannot be located. """ def __init__(self, message, tname, cname): NoReferenceError.__init__(self, message) self.table_name = tname self.column_name = cname def __reduce__(self): return ( self.__class__, (self.args[0], self.table_name, self.column_name), ) class NoSuchTableError(InvalidRequestError): """Table does not exist or is not visible to a connection.""" class UnreflectableTableError(InvalidRequestError): """Table exists but can't be reflected for some reason. .. versionadded:: 1.2 """ class UnboundExecutionError(InvalidRequestError): """SQL was attempted without a database connection to execute it on.""" class DontWrapMixin(object): """A mixin class which, when applied to a user-defined Exception class, will not be wrapped inside of :exc:`.StatementError` if the error is emitted within the process of executing a statement. E.g.:: from sqlalchemy.exc import DontWrapMixin class MyCustomException(Exception, DontWrapMixin): pass class MySpecialType(TypeDecorator): impl = String def process_bind_param(self, value, dialect): if value == 'invalid': raise MyCustomException("invalid!") """ # Moved to orm.exc; compatibility definition installed by orm import until 0.6 UnmappedColumnError = None class StatementError(SQLAlchemyError): """An error occurred during execution of a SQL statement. :class:`StatementError` wraps the exception raised during execution, and features :attr:`.statement` and :attr:`.params` attributes which supply context regarding the specifics of the statement which had an issue. The wrapped exception object is available in the :attr:`.orig` attribute. """ statement = None """The string SQL statement being invoked when this exception occurred.""" params = None """The parameter list being used when this exception occurred.""" orig = None """The DBAPI exception object.""" ismulti = None def __init__( self, message, statement, params, orig, hide_parameters=False, code=None, ismulti=None, ): SQLAlchemyError.__init__(self, message, code=code) self.statement = statement self.params = params self.orig = orig self.ismulti = ismulti self.hide_parameters = hide_parameters self.detail = [] def add_detail(self, msg): self.detail.append(msg) def __reduce__(self): return ( self.__class__, ( self.args[0], self.statement, self.params, self.orig, self.hide_parameters, self.ismulti, ), ) def _sql_message(self, as_unicode): from sqlalchemy.sql import util details = [self._message(as_unicode=as_unicode)] if self.statement: if not as_unicode and not compat.py3k: stmt_detail = "[SQL: %s]" % compat.safe_bytestring( self.statement ) else: stmt_detail = "[SQL: %s]" % self.statement details.append(stmt_detail) if self.params: if self.hide_parameters: details.append( "[SQL parameters hidden due to hide_parameters=True]" ) else: params_repr = util._repr_params( self.params, 10, ismulti=self.ismulti ) details.append("[parameters: %r]" % params_repr) code_str = self._code_str() if code_str: details.append(code_str) return "\n".join(["(%s)" % det for det in self.detail] + details) class DBAPIError(StatementError): """Raised when the execution of a database operation fails. Wraps exceptions raised by the DB-API underlying the database operation. Driver-specific implementations of the standard DB-API exception types are wrapped by matching sub-types of SQLAlchemy's :class:`DBAPIError` when possible. DB-API's ``Error`` type maps to :class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note that there is no guarantee that different DB-API implementations will raise the same exception type for any given error condition. :class:`DBAPIError` features :attr:`~.StatementError.statement` and :attr:`~.StatementError.params` attributes which supply context regarding the specifics of the statement which had an issue, for the typical case when the error was raised within the context of emitting a SQL statement. The wrapped exception object is available in the :attr:`~.StatementError.orig` attribute. Its type and properties are DB-API implementation specific. """ code = "dbapi" @classmethod def instance( cls, statement, params, orig, dbapi_base_err, hide_parameters=False, connection_invalidated=False, dialect=None, ismulti=None, ): # Don't ever wrap these, just return them directly as if # DBAPIError didn't exist. if ( isinstance(orig, BaseException) and not isinstance(orig, Exception) ) or isinstance(orig, DontWrapMixin): return orig if orig is not None: # not a DBAPI error, statement is present. # raise a StatementError if isinstance(orig, SQLAlchemyError) and statement: return StatementError( "(%s.%s) %s" % ( orig.__class__.__module__, orig.__class__.__name__, orig.args[0], ), statement, params, orig, hide_parameters=hide_parameters, code=orig.code, ismulti=ismulti, ) elif not isinstance(orig, dbapi_base_err) and statement: return StatementError( "(%s.%s) %s" % ( orig.__class__.__module__, orig.__class__.__name__, orig, ), statement, params, orig, hide_parameters=hide_parameters, ismulti=ismulti, ) glob = globals() for super_ in orig.__class__.__mro__: name = super_.__name__ if dialect: name = dialect.dbapi_exception_translation_map.get( name, name ) if name in glob and issubclass(glob[name], DBAPIError): cls = glob[name] break return cls( statement, params, orig, connection_invalidated=connection_invalidated, hide_parameters=hide_parameters, code=cls.code, ismulti=ismulti, ) def __reduce__(self): return ( self.__class__, ( self.statement, self.params, self.orig, self.hide_parameters, self.connection_invalidated, self.ismulti, ), ) def __init__( self, statement, params, orig, hide_parameters=False, connection_invalidated=False, code=None, ismulti=None, ): try: text = str(orig) except Exception as e: text = "Error in str() of DB-API-generated exception: " + str(e) StatementError.__init__( self, "(%s.%s) %s" % (orig.__class__.__module__, orig.__class__.__name__, text), statement, params, orig, hide_parameters, code=code, ismulti=ismulti, ) self.connection_invalidated = connection_invalidated class InterfaceError(DBAPIError): """Wraps a DB-API InterfaceError.""" code = "rvf5" class DatabaseError(DBAPIError): """Wraps a DB-API DatabaseError.""" code = "4xp6" class DataError(DatabaseError): """Wraps a DB-API DataError.""" code = "9h9h" class OperationalError(DatabaseError): """Wraps a DB-API OperationalError.""" code = "e3q8" class IntegrityError(DatabaseError): """Wraps a DB-API IntegrityError.""" code = "gkpj" class InternalError(DatabaseError): """Wraps a DB-API InternalError.""" code = "2j85" class ProgrammingError(DatabaseError): """Wraps a DB-API ProgrammingError.""" code = "f405" class NotSupportedError(DatabaseError): """Wraps a DB-API NotSupportedError.""" code = "tw8g" # Warnings class SADeprecationWarning(DeprecationWarning): """Issued once per usage of a deprecated API.""" class SAPendingDeprecationWarning(PendingDeprecationWarning): """Issued once per usage of a deprecated API.""" class SAWarning(RuntimeWarning): """Issued at runtime."""
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/processors.py
# sqlalchemy/processors.py # Copyright (C) 2010-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """defines generic type conversion functions, as used in bind and result processors. They all share one common characteristic: None is passed through unchanged. """ import codecs import datetime import re from . import util def str_to_datetime_processor_factory(regexp, type_): rmatch = regexp.match # Even on python2.6 datetime.strptime is both slower than this code # and it does not support microseconds. has_named_groups = bool(regexp.groupindex) def process(value): if value is None: return None else: try: m = rmatch(value) except TypeError as err: util.raise_( ValueError( "Couldn't parse %s string '%r' " "- value is not a string." % (type_.__name__, value) ), from_=err, ) if m is None: raise ValueError( "Couldn't parse %s string: " "'%s'" % (type_.__name__, value) ) if has_named_groups: groups = m.groupdict(0) return type_( **dict( list( zip( iter(groups.keys()), list(map(int, iter(groups.values()))), ) ) ) ) else: return type_(*list(map(int, m.groups(0)))) return process def py_fallback(): def to_unicode_processor_factory(encoding, errors=None): decoder = codecs.getdecoder(encoding) def process(value): if value is None: return None else: # decoder returns a tuple: (value, len). Simply dropping the # len part is safe: it is done that way in the normal # 'xx'.decode(encoding) code path. return decoder(value, errors)[0] return process def to_conditional_unicode_processor_factory(encoding, errors=None): decoder = codecs.getdecoder(encoding) def process(value): if value is None: return None elif isinstance(value, util.text_type): return value else: # decoder returns a tuple: (value, len). Simply dropping the # len part is safe: it is done that way in the normal # 'xx'.decode(encoding) code path. return decoder(value, errors)[0] return process def to_decimal_processor_factory(target_class, scale): fstring = "%%.%df" % scale def process(value): if value is None: return None else: return target_class(fstring % value) return process def to_float(value): # noqa if value is None: return None else: return float(value) def to_str(value): # noqa if value is None: return None else: return str(value) def int_to_boolean(value): # noqa if value is None: return None else: return bool(value) DATETIME_RE = re.compile( r"(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)(?:\.(\d+))?" ) TIME_RE = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?") DATE_RE = re.compile(r"(\d+)-(\d+)-(\d+)") str_to_datetime = str_to_datetime_processor_factory( # noqa DATETIME_RE, datetime.datetime ) str_to_time = str_to_datetime_processor_factory( # noqa TIME_RE, datetime.time ) # noqa str_to_date = str_to_datetime_processor_factory( # noqa DATE_RE, datetime.date ) # noqa return locals() try: from sqlalchemy.cprocessors import DecimalResultProcessor # noqa from sqlalchemy.cprocessors import int_to_boolean # noqa from sqlalchemy.cprocessors import str_to_date # noqa from sqlalchemy.cprocessors import str_to_datetime # noqa from sqlalchemy.cprocessors import str_to_time # noqa from sqlalchemy.cprocessors import to_float # noqa from sqlalchemy.cprocessors import to_str # noqa from sqlalchemy.cprocessors import UnicodeResultProcessor # noqa def to_unicode_processor_factory(encoding, errors=None): if errors is not None: return UnicodeResultProcessor(encoding, errors).process else: return UnicodeResultProcessor(encoding).process def to_conditional_unicode_processor_factory(encoding, errors=None): if errors is not None: return UnicodeResultProcessor(encoding, errors).conditional_process else: return UnicodeResultProcessor(encoding).conditional_process def to_decimal_processor_factory(target_class, scale): # Note that the scale argument is not taken into account for integer # values in the C implementation while it is in the Python one. # For example, the Python implementation might return # Decimal('5.00000') whereas the C implementation will # return Decimal('5'). These are equivalent of course. return DecimalResultProcessor(target_class, "%%.%df" % scale).process except ImportError: globals().update(py_fallback())
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/schema.py
# schema.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Compatibility namespace for sqlalchemy.sql.schema and related. """ from .sql.base import SchemaVisitor # noqa from .sql.ddl import _CreateDropBase # noqa from .sql.ddl import _DDLCompiles # noqa from .sql.ddl import _DropView # noqa from .sql.ddl import AddConstraint # noqa from .sql.ddl import CreateColumn # noqa from .sql.ddl import CreateIndex # noqa from .sql.ddl import CreateSchema # noqa from .sql.ddl import CreateSequence # noqa from .sql.ddl import CreateTable # noqa from .sql.ddl import DDL # noqa from .sql.ddl import DDLBase # noqa from .sql.ddl import DDLElement # noqa from .sql.ddl import DropColumnComment # noqa from .sql.ddl import DropConstraint # noqa from .sql.ddl import DropIndex # noqa from .sql.ddl import DropSchema # noqa from .sql.ddl import DropSequence # noqa from .sql.ddl import DropTable # noqa from .sql.ddl import DropTableComment # noqa from .sql.ddl import SetColumnComment # noqa from .sql.ddl import SetTableComment # noqa from .sql.ddl import sort_tables # noqa from .sql.ddl import sort_tables_and_constraints # noqa from .sql.naming import conv # noqa from .sql.schema import _get_table_key # noqa from .sql.schema import BLANK_SCHEMA # noqa from .sql.schema import CheckConstraint # noqa from .sql.schema import Column # noqa from .sql.schema import ColumnCollectionConstraint # noqa from .sql.schema import ColumnCollectionMixin # noqa from .sql.schema import ColumnDefault # noqa from .sql.schema import Computed # noqa from .sql.schema import Constraint # noqa from .sql.schema import DefaultClause # noqa from .sql.schema import DefaultGenerator # noqa from .sql.schema import FetchedValue # noqa from .sql.schema import ForeignKey # noqa from .sql.schema import ForeignKeyConstraint # noqa from .sql.schema import Index # noqa from .sql.schema import IdentityOptions # noqa from .sql.schema import MetaData # noqa from .sql.schema import PassiveDefault # noqa from .sql.schema import PrimaryKeyConstraint # noqa from .sql.schema import SchemaItem # noqa from .sql.schema import Sequence # noqa from .sql.schema import Table # noqa from .sql.schema import ThreadLocalMetaData # noqa from .sql.schema import UniqueConstraint # noqa
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/connectors/mxodbc.py
# connectors/mxodbc.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Provide a SQLALchemy connector for the eGenix mxODBC commercial Python adapter for ODBC. This is not a free product, but eGenix provides SQLAlchemy with a license for use in continuous integration testing. This has been tested for use with mxODBC 3.1.2 on SQL Server 2005 and 2008, using the SQL Server Native driver. However, it is possible for this to be used on other database platforms. For more info on mxODBC, see http://www.egenix.com/ """ import re import sys import warnings from . import Connector class MxODBCConnector(Connector): driver = "mxodbc" supports_sane_multi_rowcount = False supports_unicode_statements = True supports_unicode_binds = True supports_native_decimal = True @classmethod def dbapi(cls): # this classmethod will normally be replaced by an instance # attribute of the same name, so this is normally only called once. cls._load_mx_exceptions() platform = sys.platform if platform == "win32": from mx.ODBC import Windows as Module # this can be the string "linux2", and possibly others elif "linux" in platform: from mx.ODBC import unixODBC as Module elif platform == "darwin": from mx.ODBC import iODBC as Module else: raise ImportError("Unrecognized platform for mxODBC import") return Module @classmethod def _load_mx_exceptions(cls): """ Import mxODBC exception classes into the module namespace, as if they had been imported normally. This is done here to avoid requiring all SQLAlchemy users to install mxODBC. """ global InterfaceError, ProgrammingError from mx.ODBC import InterfaceError from mx.ODBC import ProgrammingError def on_connect(self): def connect(conn): conn.stringformat = self.dbapi.MIXED_STRINGFORMAT conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT conn.errorhandler = self._error_handler() return connect def _error_handler(self): """ Return a handler that adjusts mxODBC's raised Warnings to emit Python standard warnings. """ from mx.ODBC.Error import Warning as MxOdbcWarning def error_handler(connection, cursor, errorclass, errorvalue): if issubclass(errorclass, MxOdbcWarning): errorclass.__bases__ = (Warning,) warnings.warn( message=str(errorvalue), category=errorclass, stacklevel=2 ) else: raise errorclass(errorvalue) return error_handler def create_connect_args(self, url): r"""Return a tuple of \*args, \**kwargs for creating a connection. The mxODBC 3.x connection constructor looks like this: connect(dsn, user='', password='', clear_auto_commit=1, errorhandler=None) This method translates the values in the provided uri into args and kwargs needed to instantiate an mxODBC Connection. The arg 'errorhandler' is not used by SQLAlchemy and will not be populated. """ opts = url.translate_connect_args(username="user") opts.update(url.query) args = opts.pop("host") opts.pop("port", None) opts.pop("database", None) return (args,), opts def is_disconnect(self, e, connection, cursor): # TODO: eGenix recommends checking connection.closed here # Does that detect dropped connections ? if isinstance(e, self.dbapi.ProgrammingError): return "connection already closed" in str(e) elif isinstance(e, self.dbapi.Error): return "[08S01]" in str(e) else: return False def _get_server_version_info(self, connection): # eGenix suggests using conn.dbms_version instead # of what we're doing here dbapi_con = connection.connection version = [] r = re.compile(r"[.\-]") # 18 == pyodbc.SQL_DBMS_VER for n in r.split(dbapi_con.getinfo(18)[1]): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) def _get_direct(self, context): if context: native_odbc_execute = context.execution_options.get( "native_odbc_execute", "auto" ) # default to direct=True in all cases, is more generally # compatible especially with SQL Server return False if native_odbc_execute is True else True else: return True def do_executemany(self, cursor, statement, parameters, context=None): cursor.executemany( statement, parameters, direct=self._get_direct(context) ) def do_execute(self, cursor, statement, parameters, context=None): cursor.execute(statement, parameters, direct=self._get_direct(context))
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/connectors/zxJDBC.py
# connectors/zxJDBC.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import sys from . import Connector class ZxJDBCConnector(Connector): driver = "zxjdbc" supports_sane_rowcount = False supports_sane_multi_rowcount = False supports_unicode_binds = True supports_unicode_statements = sys.version > "2.5.0+" description_encoding = None default_paramstyle = "qmark" jdbc_db_name = None jdbc_driver_name = None @classmethod def dbapi(cls): from com.ziclix.python.sql import zxJDBC return zxJDBC def _driver_kwargs(self): """Return kw arg dict to be sent to connect().""" return {} def _create_jdbc_url(self, url): """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`""" return "jdbc:%s://%s%s/%s" % ( self.jdbc_db_name, url.host, url.port is not None and ":%s" % url.port or "", url.database, ) def create_connect_args(self, url): opts = self._driver_kwargs() opts.update(url.query) return [ [ self._create_jdbc_url(url), url.username, url.password, self.jdbc_driver_name, ], opts, ] def is_disconnect(self, e, connection, cursor): if not isinstance(e, self.dbapi.ProgrammingError): return False e = str(e) return "connection is closed" in e or "cursor is closed" in e def _get_server_version_info(self, connection): # use connection.connection.dbversion, and parse appropriately # to get a tuple raise NotImplementedError()
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/connectors/__init__.py
# connectors/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php class Connector(object): pass
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/connectors/pyodbc.py
# connectors/pyodbc.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re from . import Connector from .. import util class PyODBCConnector(Connector): driver = "pyodbc" # this is no longer False for pyodbc in general supports_sane_rowcount_returning = True supports_sane_multi_rowcount = False supports_unicode_statements = True supports_unicode_binds = True supports_native_decimal = True default_paramstyle = "named" # for non-DSN connections, this *may* be used to # hold the desired driver name pyodbc_driver_name = None def __init__(self, supports_unicode_binds=None, **kw): super(PyODBCConnector, self).__init__(**kw) if supports_unicode_binds is not None: self.supports_unicode_binds = supports_unicode_binds @classmethod def dbapi(cls): return __import__("pyodbc") def create_connect_args(self, url): opts = url.translate_connect_args(username="user") opts.update(url.query) keys = opts query = url.query connect_args = {} for param in ("ansi", "unicode_results", "autocommit"): if param in keys: connect_args[param] = util.asbool(keys.pop(param)) if "odbc_connect" in keys: connectors = [util.unquote_plus(keys.pop("odbc_connect"))] else: def check_quote(token): if ";" in str(token): token = "{%s}" % token.replace("}", "}}") return token keys = dict((k, check_quote(v)) for k, v in keys.items()) dsn_connection = "dsn" in keys or ( "host" in keys and "database" not in keys ) if dsn_connection: connectors = [ "dsn=%s" % (keys.pop("host", "") or keys.pop("dsn", "")) ] else: port = "" if "port" in keys and "port" not in query: port = ",%d" % int(keys.pop("port")) connectors = [] driver = keys.pop("driver", self.pyodbc_driver_name) if driver is None and keys: # note if keys is empty, this is a totally blank URL util.warn( "No driver name specified; " "this is expected by PyODBC when using " "DSN-less connections" ) else: connectors.append("DRIVER={%s}" % driver) connectors.extend( [ "Server=%s%s" % (keys.pop("host", ""), port), "Database=%s" % keys.pop("database", ""), ] ) user = keys.pop("user", None) if user: connectors.append("UID=%s" % user) connectors.append("PWD=%s" % keys.pop("password", "")) else: connectors.append("Trusted_Connection=Yes") # if set to 'Yes', the ODBC layer will try to automagically # convert textual data from your database encoding to your # client encoding. This should obviously be set to 'No' if # you query a cp1253 encoded database from a latin1 client... if "odbc_autotranslate" in keys: connectors.append( "AutoTranslate=%s" % keys.pop("odbc_autotranslate") ) connectors.extend(["%s=%s" % (k, v) for k, v in keys.items()]) return [[";".join(connectors)], connect_args] def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.ProgrammingError): return "The cursor's connection has been closed." in str( e ) or "Attempt to use a closed connection." in str(e) else: return False # def initialize(self, connection): # super(PyODBCConnector, self).initialize(connection) def _dbapi_version(self): if not self.dbapi: return () return self._parse_dbapi_version(self.dbapi.version) def _parse_dbapi_version(self, vers): m = re.match(r"(?:py.*-)?([\d\.]+)(?:-(\w+))?", vers) if not m: return () vers = tuple([int(x) for x in m.group(1).split(".")]) if m.group(2): vers += (m.group(2),) return vers def _get_server_version_info(self, connection, allow_chars=True): # NOTE: this function is not reliable, particularly when # freetds is in use. Implement database-specific server version # queries. dbapi_con = connection.connection version = [] r = re.compile(r"[.\-]") for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)): try: version.append(int(n)) except ValueError: if allow_chars: version.append(n) return tuple(version) def set_isolation_level(self, connection, level): # adjust for ConnectionFairy being present # allows attribute set e.g. "connection.autocommit = True" # to work properly if hasattr(connection, "connection"): connection = connection.connection if level == "AUTOCOMMIT": connection.autocommit = True else: connection.autocommit = False super(PyODBCConnector, self).set_isolation_level(connection, level)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/databases/__init__.py
# databases/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Include imports from the sqlalchemy.dialects package for backwards compatibility with pre 0.6 versions. """ from ..dialects.firebird import base as firebird from ..dialects.mssql import base as mssql from ..dialects.mysql import base as mysql from ..dialects.oracle import base as oracle from ..dialects.postgresql import base as postgresql from ..dialects.sqlite import base as sqlite from ..dialects.sybase import base as sybase postgres = postgresql __all__ = ( "firebird", "mssql", "mysql", "postgresql", "sqlite", "oracle", "sybase", )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/util/queue.py
# util/queue.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """An adaptation of Py2.3/2.4's Queue module which supports reentrant behavior, using RLock instead of Lock for its mutex object. The Queue object is used exclusively by the sqlalchemy.pool.QueuePool class. This is to support the connection pool's usage of weakref callbacks to return connections to the underlying Queue, which can in extremely rare cases be invoked within the ``get()`` method of the Queue itself, producing a ``put()`` inside the ``get()`` and therefore a reentrant condition. """ from collections import deque from time import time as _time from .compat import threading __all__ = ["Empty", "Full", "Queue"] class Empty(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass class Full(Exception): "Exception raised by Queue.put(block=0)/put_nowait()." pass class Queue: def __init__(self, maxsize=0, use_lifo=False): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. If `use_lifo` is True, this Queue acts like a Stack (LIFO). """ self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) # If this queue uses LIFO or FIFO self.use_lifo = use_lifo def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" self.mutex.acquire() n = self._empty() self.mutex.release() return n def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = self._full() self.mutex.release() return n def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Full`` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the ``Full`` exception (`timeout` is ignored in that case). """ self.not_full.acquire() try: if not block: if self._full(): raise Full elif timeout is None: while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._full(): remaining = endtime - _time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.not_empty.notify() finally: self.not_full.release() def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the ``Full`` exception. """ return self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Empty`` exception if no item was available within that time. Otherwise (`block` is false), return an item if one is immediately available, else raise the ``Empty`` exception (`timeout` is ignored in that case). """ self.not_empty.acquire() try: if not block: if self._empty(): raise Empty elif timeout is None: while self._empty(): self.not_empty.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item finally: self.not_empty.release() def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the ``Empty`` exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): if self.use_lifo: # LIFO return self.queue.pop() else: # FIFO return self.queue.popleft()
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/util/topological.py
# util/topological.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Topological sorting algorithms.""" from .. import util from ..exc import CircularDependencyError __all__ = ["sort", "sort_as_subsets", "find_cycles"] def sort_as_subsets(tuples, allitems, deterministic_order=False): edges = util.defaultdict(set) for parent, child in tuples: edges[child].add(parent) Set = util.OrderedSet if deterministic_order else set todo = Set(allitems) while todo: output = Set() for node in todo: if todo.isdisjoint(edges[node]): output.add(node) if not output: raise CircularDependencyError( "Circular dependency detected.", find_cycles(tuples, allitems), _gen_edges(edges), ) todo.difference_update(output) yield output def sort(tuples, allitems, deterministic_order=False): """sort the given list of items by dependency. 'tuples' is a list of tuples representing a partial ordering. 'deterministic_order' keeps items within a dependency tier in list order. """ for set_ in sort_as_subsets(tuples, allitems, deterministic_order): for s in set_: yield s def find_cycles(tuples, allitems): # adapted from: # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html edges = util.defaultdict(set) for parent, child in tuples: edges[parent].add(child) nodes_to_test = set(edges) output = set() # we'd like to find all nodes that are # involved in cycles, so we do the full # pass through the whole thing for each # node in the original list. # we can go just through parent edge nodes. # if a node is only a child and never a parent, # by definition it can't be part of a cycle. same # if it's not in the edges at all. for node in nodes_to_test: stack = [node] todo = nodes_to_test.difference(stack) while stack: top = stack[-1] for node in edges[top]: if node in stack: cyc = stack[stack.index(node) :] todo.difference_update(cyc) output.update(cyc) if node in todo: stack.append(node) todo.remove(node) break else: node = stack.pop() return output def _gen_edges(edges): return set([(right, left) for left in edges for right in edges[left]])
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/util/compat.py
# util/compat.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Handle Python version/platform incompatibilities.""" import collections import contextlib import inspect import operator import sys py36 = sys.version_info >= (3, 6) py33 = sys.version_info >= (3, 3) py35 = sys.version_info >= (3, 5) py32 = sys.version_info >= (3, 2) py3k = sys.version_info >= (3, 0) py2k = sys.version_info < (3, 0) py265 = sys.version_info >= (2, 6, 5) jython = sys.platform.startswith("java") pypy = hasattr(sys, "pypy_version_info") win32 = sys.platform.startswith("win") cpython = not pypy and not jython # TODO: something better for this ? contextmanager = contextlib.contextmanager dottedgetter = operator.attrgetter namedtuple = collections.namedtuple next = next # noqa FullArgSpec = collections.namedtuple( "FullArgSpec", [ "args", "varargs", "varkw", "defaults", "kwonlyargs", "kwonlydefaults", "annotations", ], ) try: import threading except ImportError: import dummy_threading as threading # noqa # work around http://bugs.python.org/issue2646 if py265: safe_kwarg = lambda arg: arg # noqa else: safe_kwarg = str def inspect_getfullargspec(func): """Fully vendored version of getfullargspec from Python 3.3.""" if inspect.ismethod(func): func = func.__func__ if not inspect.isfunction(func): raise TypeError("{!r} is not a Python function".format(func)) co = func.__code__ if not inspect.iscode(co): raise TypeError("{!r} is not a code object".format(co)) nargs = co.co_argcount names = co.co_varnames nkwargs = co.co_kwonlyargcount if py3k else 0 args = list(names[:nargs]) kwonlyargs = list(names[nargs : nargs + nkwargs]) nargs += nkwargs varargs = None if co.co_flags & inspect.CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 varkw = None if co.co_flags & inspect.CO_VARKEYWORDS: varkw = co.co_varnames[nargs] return FullArgSpec( args, varargs, varkw, func.__defaults__, kwonlyargs, func.__kwdefaults__ if py3k else None, func.__annotations__ if py3k else {}, ) if py3k: import base64 import builtins import configparser import itertools import pickle from functools import reduce from io import BytesIO as byte_buffer from io import StringIO from itertools import zip_longest from urllib.parse import ( quote_plus, unquote_plus, parse_qsl, quote, unquote, ) string_types = (str,) binary_types = (bytes,) binary_type = bytes text_type = str int_types = (int,) iterbytes = iter itertools_filterfalse = itertools.filterfalse itertools_filter = filter itertools_imap = map exec_ = getattr(builtins, "exec") import_ = getattr(builtins, "__import__") print_ = getattr(builtins, "print") def b(s): return s.encode("latin-1") def b64decode(x): return base64.b64decode(x.encode("ascii")) def b64encode(x): return base64.b64encode(x).decode("ascii") def decode_backslashreplace(text, encoding): return text.decode(encoding, errors="backslashreplace") def cmp(a, b): return (a > b) - (a < b) def raise_( exception, with_traceback=None, replace_context=None, from_=False ): r"""implement "raise" with cause support. :param exception: exception to raise :param with_traceback: will call exception.with_traceback() :param replace_context: an as-yet-unsupported feature. This is an exception object which we are "replacing", e.g., it's our "cause" but we don't want it printed. Basically just what ``__suppress_context__`` does but we don't want to suppress the enclosing context, if any. So for now we make it the cause. :param from\_: the cause. this actually sets the cause and doesn't hope to hide it someday. """ if with_traceback is not None: exception = exception.with_traceback(with_traceback) if from_ is not False: exception.__cause__ = from_ elif replace_context is not None: # no good solution here, we would like to have the exception # have only the context of replace_context.__context__ so that the # intermediary exception does not change, but we can't figure # that out. exception.__cause__ = replace_context try: raise exception finally: # credit to # https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/ # as the __traceback__ object creates a cycle del exception, replace_context, from_, with_traceback def u(s): return s def ue(s): return s if py32: callable = callable # noqa else: def callable(fn): # noqa return hasattr(fn, "__call__") else: import base64 import ConfigParser as configparser # noqa import itertools from StringIO import StringIO # noqa from cStringIO import StringIO as byte_buffer # noqa from itertools import izip_longest as zip_longest # noqa from urllib import quote # noqa from urllib import quote_plus # noqa from urllib import unquote # noqa from urllib import unquote_plus # noqa from urlparse import parse_qsl # noqa try: import cPickle as pickle except ImportError: import pickle # noqa string_types = (basestring,) # noqa binary_types = (bytes,) binary_type = str text_type = unicode # noqa int_types = int, long # noqa callable = callable # noqa cmp = cmp # noqa reduce = reduce # noqa b64encode = base64.b64encode b64decode = base64.b64decode itertools_filterfalse = itertools.ifilterfalse itertools_filter = itertools.ifilter itertools_imap = itertools.imap def b(s): return s def exec_(func_text, globals_, lcl=None): if lcl is None: exec("exec func_text in globals_") else: exec("exec func_text in globals_, lcl") def iterbytes(buf): return (ord(byte) for byte in buf) def import_(*args): if len(args) == 4: args = args[0:3] + ([str(arg) for arg in args[3]],) return __import__(*args) def print_(*args, **kwargs): fp = kwargs.pop("file", sys.stdout) if fp is None: return for arg in enumerate(args): if not isinstance(arg, basestring): # noqa arg = str(arg) fp.write(arg) def u(s): # this differs from what six does, which doesn't support non-ASCII # strings - we only use u() with # literal source strings, and all our source files with non-ascii # in them (all are tests) are utf-8 encoded. return unicode(s, "utf-8") # noqa def ue(s): return unicode(s, "unicode_escape") # noqa def decode_backslashreplace(text, encoding): try: return text.decode(encoding) except UnicodeDecodeError: # regular "backslashreplace" for an incompatible encoding raises: # "TypeError: don't know how to handle UnicodeDecodeError in # error callback" return repr(text)[1:-1].decode() def safe_bytestring(text): # py2k only if not isinstance(text, string_types): return unicode(text).encode("ascii", errors="backslashreplace") elif isinstance(text, unicode): return text.encode("ascii", errors="backslashreplace") else: return text exec( "def raise_(exception, with_traceback=None, replace_context=None, " "from_=False):\n" " if with_traceback:\n" " raise type(exception), exception, with_traceback\n" " else:\n" " raise exception\n" ) if py35: def _formatannotation(annotation, base_module=None): """vendored from python 3.7 """ if getattr(annotation, "__module__", None) == "typing": return repr(annotation).replace("typing.", "") if isinstance(annotation, type): if annotation.__module__ in ("builtins", base_module): return annotation.__qualname__ return annotation.__module__ + "." + annotation.__qualname__ return repr(annotation) def inspect_formatargspec( args, varargs=None, varkw=None, defaults=None, kwonlyargs=(), kwonlydefaults={}, annotations={}, formatarg=str, formatvarargs=lambda name: "*" + name, formatvarkw=lambda name: "**" + name, formatvalue=lambda value: "=" + repr(value), formatreturns=lambda text: " -> " + text, formatannotation=_formatannotation, ): """Copy formatargspec from python 3.7 standard library. Python 3 has deprecated formatargspec and requested that Signature be used instead, however this requires a full reimplementation of formatargspec() in terms of creating Parameter objects and such. Instead of introducing all the object-creation overhead and having to reinvent from scratch, just copy their compatibility routine. Utimately we would need to rewrite our "decorator" routine completely which is not really worth it right now, until all Python 2.x support is dropped. """ def formatargandannotation(arg): result = formatarg(arg) if arg in annotations: result += ": " + formatannotation(annotations[arg]) return result specs = [] if defaults: firstdefault = len(args) - len(defaults) for i, arg in enumerate(args): spec = formatargandannotation(arg) if defaults and i >= firstdefault: spec = spec + formatvalue(defaults[i - firstdefault]) specs.append(spec) if varargs is not None: specs.append(formatvarargs(formatargandannotation(varargs))) else: if kwonlyargs: specs.append("*") if kwonlyargs: for kwonlyarg in kwonlyargs: spec = formatargandannotation(kwonlyarg) if kwonlydefaults and kwonlyarg in kwonlydefaults: spec += formatvalue(kwonlydefaults[kwonlyarg]) specs.append(spec) if varkw is not None: specs.append(formatvarkw(formatargandannotation(varkw))) result = "(" + ", ".join(specs) + ")" if "return" in annotations: result += formatreturns(formatannotation(annotations["return"])) return result elif py2k: from inspect import formatargspec as _inspect_formatargspec def inspect_formatargspec(*spec, **kw): # convert for a potential FullArgSpec from compat.getfullargspec() return _inspect_formatargspec(*spec[0:4], **kw) # noqa else: from inspect import formatargspec as inspect_formatargspec # noqa # Fix deprecation of accessing ABCs straight from collections module # (which will stop working in 3.8). if py33: import collections.abc as collections_abc else: import collections as collections_abc # noqa @contextlib.contextmanager def nested(*managers): """Implement contextlib.nested, mostly for unit tests. As tests still need to run on py2.6 we can't use multiple-with yet. Function is removed in py3k but also emits deprecation warning in 2.7 so just roll it here for everyone. """ exits = [] vars_ = [] exc = (None, None, None) try: for mgr in managers: exit_ = mgr.__exit__ enter = mgr.__enter__ vars_.append(enter()) exits.append(exit_) yield vars_ except: exc = sys.exc_info() finally: while exits: exit_ = exits.pop() # noqa try: if exit_(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): reraise(exc[0], exc[1], exc[2]) def raise_from_cause(exception, exc_info=None): r"""legacy. use raise\_()""" if exc_info is None: exc_info = sys.exc_info() exc_type, exc_value, exc_tb = exc_info cause = exc_value if exc_value is not exception else None reraise(type(exception), exception, tb=exc_tb, cause=cause) def reraise(tp, value, tb=None, cause=None): r"""legacy. use raise\_()""" raise_(value, with_traceback=tb, from_=cause) def with_metaclass(meta, *bases): """Create a base class with a metaclass. Drops the middle class upon creation. Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/ """ class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass("temporary_class", None, {}) if py3k: from datetime import timezone else: from datetime import datetime from datetime import timedelta from datetime import tzinfo class timezone(tzinfo): """Minimal port of python 3 timezone object""" __slots__ = "_offset" def __init__(self, offset): if not isinstance(offset, timedelta): raise TypeError("offset must be a timedelta") if not self._minoffset <= offset <= self._maxoffset: raise ValueError( "offset must be a timedelta " "strictly between -timedelta(hours=24) and " "timedelta(hours=24)." ) self._offset = offset def __eq__(self, other): if type(other) != timezone: return False return self._offset == other._offset def __hash__(self): return hash(self._offset) def __repr__(self): return "sqlalchemy.util.%s(%r)" % ( self.__class__.__name__, self._offset, ) def __str__(self): return self.tzname(None) def utcoffset(self, dt): return self._offset def tzname(self, dt): return self._name_from_offset(self._offset) def dst(self, dt): return None def fromutc(self, dt): if isinstance(dt, datetime): if dt.tzinfo is not self: raise ValueError("fromutc: dt.tzinfo " "is not self") return dt + self._offset raise TypeError( "fromutc() argument must be a datetime instance" " or None" ) @staticmethod def _timedelta_to_microseconds(timedelta): """backport of timedelta._to_microseconds()""" return ( timedelta.days * (24 * 3600) + timedelta.seconds ) * 1000000 + timedelta.microseconds @staticmethod def _divmod_timedeltas(a, b): """backport of timedelta.__divmod__""" q, r = divmod( timezone._timedelta_to_microseconds(a), timezone._timedelta_to_microseconds(b), ) return q, timedelta(0, 0, r) @staticmethod def _name_from_offset(delta): if not delta: return "UTC" if delta < timedelta(0): sign = "-" delta = -delta else: sign = "+" hours, rest = timezone._divmod_timedeltas( delta, timedelta(hours=1) ) minutes, rest = timezone._divmod_timedeltas( rest, timedelta(minutes=1) ) result = "UTC%s%02d:%02d" % (sign, hours, minutes) if rest.seconds: result += ":%02d" % (rest.seconds,) if rest.microseconds: result += ".%06d" % (rest.microseconds,) return result _maxoffset = timedelta(hours=23, minutes=59) _minoffset = -_maxoffset timezone.utc = timezone(timedelta(0))
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/util/__init__.py
# util/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from collections import defaultdict # noqa from contextlib import contextmanager # noqa from functools import partial # noqa from functools import update_wrapper # noqa from ._collections import coerce_generator_arg # noqa from ._collections import collections_abc # noqa from ._collections import column_dict # noqa from ._collections import column_set # noqa from ._collections import EMPTY_SET # noqa from ._collections import flatten_iterator # noqa from ._collections import has_dupes # noqa from ._collections import has_intersection # noqa from ._collections import IdentitySet # noqa from ._collections import ImmutableContainer # noqa from ._collections import immutabledict # noqa from ._collections import ImmutableProperties # noqa from ._collections import KeyedTuple # noqa from ._collections import lightweight_named_tuple # noqa from ._collections import LRUCache # noqa from ._collections import ordered_column_set # noqa from ._collections import OrderedDict # noqa from ._collections import OrderedIdentitySet # noqa from ._collections import OrderedProperties # noqa from ._collections import OrderedSet # noqa from ._collections import PopulateDict # noqa from ._collections import Properties # noqa from ._collections import ScopedRegistry # noqa from ._collections import ThreadLocalRegistry # noqa from ._collections import to_column_set # noqa from ._collections import to_list # noqa from ._collections import to_set # noqa from ._collections import unique_list # noqa from ._collections import UniqueAppender # noqa from ._collections import update_copy # noqa from ._collections import WeakPopulateDict # noqa from ._collections import WeakSequence # noqa from .compat import b # noqa from .compat import b64decode # noqa from .compat import b64encode # noqa from .compat import binary_type # noqa from .compat import byte_buffer # noqa from .compat import callable # noqa from .compat import cmp # noqa from .compat import cpython # noqa from .compat import decode_backslashreplace # noqa from .compat import dottedgetter # noqa from .compat import inspect_getfullargspec # noqa from .compat import int_types # noqa from .compat import iterbytes # noqa from .compat import itertools_filter # noqa from .compat import itertools_filterfalse # noqa from .compat import jython # noqa from .compat import namedtuple # noqa from .compat import nested # noqa from .compat import next # noqa from .compat import parse_qsl # noqa from .compat import pickle # noqa from .compat import print_ # noqa from .compat import py2k # noqa from .compat import py33 # noqa from .compat import py36 # noqa from .compat import py3k # noqa from .compat import pypy # noqa from .compat import quote_plus # noqa from .compat import raise_ # noqa from .compat import raise_from_cause # noqa from .compat import reduce # noqa from .compat import reraise # noqa from .compat import safe_kwarg # noqa from .compat import string_types # noqa from .compat import StringIO # noqa from .compat import text_type # noqa from .compat import threading # noqa from .compat import timezone # noqa from .compat import u # noqa from .compat import ue # noqa from .compat import unquote # noqa from .compat import unquote_plus # noqa from .compat import win32 # noqa from .compat import with_metaclass # noqa from .compat import zip_longest # noqa from .deprecations import deprecated # noqa from .deprecations import deprecated_cls # noqa from .deprecations import deprecated_params # noqa from .deprecations import inject_docstring_text # noqa from .deprecations import pending_deprecation # noqa from .deprecations import warn_deprecated # noqa from .deprecations import warn_pending_deprecation # noqa from .langhelpers import add_parameter_text # noqa from .langhelpers import as_interface # noqa from .langhelpers import asbool # noqa from .langhelpers import asint # noqa from .langhelpers import assert_arg_type # noqa from .langhelpers import attrsetter # noqa from .langhelpers import bool_or_str # noqa from .langhelpers import chop_traceback # noqa from .langhelpers import class_hierarchy # noqa from .langhelpers import classproperty # noqa from .langhelpers import clsname_as_plain_name # noqa from .langhelpers import coerce_kw_type # noqa from .langhelpers import constructor_copy # noqa from .langhelpers import counter # noqa from .langhelpers import decode_slice # noqa from .langhelpers import decorator # noqa from .langhelpers import dependencies # noqa from .langhelpers import dictlike_iteritems # noqa from .langhelpers import duck_type_collection # noqa from .langhelpers import ellipses_string # noqa from .langhelpers import EnsureKWArgType # noqa from .langhelpers import format_argspec_init # noqa from .langhelpers import format_argspec_plus # noqa from .langhelpers import generic_repr # noqa from .langhelpers import get_callable_argspec # noqa from .langhelpers import get_cls_kwargs # noqa from .langhelpers import get_func_kwargs # noqa from .langhelpers import getargspec_init # noqa from .langhelpers import group_expirable_memoized_property # noqa from .langhelpers import hybridmethod # noqa from .langhelpers import hybridproperty # noqa from .langhelpers import iterate_attributes # noqa from .langhelpers import map_bits # noqa from .langhelpers import md5_hex # noqa from .langhelpers import memoized_instancemethod # noqa from .langhelpers import memoized_property # noqa from .langhelpers import MemoizedSlots # noqa from .langhelpers import methods_equivalent # noqa from .langhelpers import monkeypatch_proxied_specials # noqa from .langhelpers import NoneType # noqa from .langhelpers import only_once # noqa from .langhelpers import PluginLoader # noqa from .langhelpers import portable_instancemethod # noqa from .langhelpers import quoted_token_parser # noqa from .langhelpers import safe_reraise # noqa from .langhelpers import set_creation_order # noqa from .langhelpers import symbol # noqa from .langhelpers import unbound_method_to_callable # noqa from .langhelpers import warn # noqa from .langhelpers import warn_exception # noqa from .langhelpers import warn_limited # noqa from .langhelpers import wrap_callable # noqa # things that used to be not always available, # but are now as of current support Python versions
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/util/deprecations.py
# util/deprecations.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Helpers related to deprecation of functions, methods, classes, other functionality.""" import re import warnings from . import compat from .langhelpers import _hash_limit_string from .langhelpers import decorator from .langhelpers import inject_docstring_text from .langhelpers import inject_param_text from .. import exc def warn_deprecated(msg, stacklevel=3): warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel) def warn_deprecated_limited(msg, args, stacklevel=3): """Issue a deprecation warning with a parameterized string, limiting the number of registrations. """ if args: msg = _hash_limit_string(msg, 10, args) warnings.warn(msg, exc.SADeprecationWarning, stacklevel) def warn_pending_deprecation(msg, stacklevel=3): warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel) def deprecated_cls(version, message, constructor="__init__"): header = ".. deprecated:: %s %s" % (version, (message or "")) def decorate(cls): return _decorate_cls_with_warning( cls, constructor, exc.SADeprecationWarning, message % dict(func=constructor), header, ) return decorate def deprecated(version, message=None, add_deprecation_to_docstring=True): """Decorates a function and issues a deprecation warning on use. :param version: Issue version in the warning. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s %s" % (version, (message or "")) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SADeprecationWarning, message % dict(func=fn.__name__), header, ) return decorate def deprecated_params(**specs): """Decorates a function to warn on use of certain parameters. e.g. :: @deprecated_params( weak_identity_map=( "0.7", "the :paramref:`.Session.weak_identity_map parameter " "is deprecated." ) ) """ messages = {} for param, (version, message) in specs.items(): messages[param] = _sanitize_restructured_text(message) def decorate(fn): spec = compat.inspect_getfullargspec(fn) if spec.defaults is not None: defaults = dict( zip( spec.args[(len(spec.args) - len(spec.defaults)) :], spec.defaults, ) ) check_defaults = set(defaults).intersection(messages) check_kw = set(messages).difference(defaults) else: check_defaults = () check_kw = set(messages) @decorator def warned(fn, *args, **kwargs): for m in check_defaults: if kwargs[m] != defaults[m]: warnings.warn( messages[m], exc.SADeprecationWarning, stacklevel=3 ) for m in check_kw: if m in kwargs: warnings.warn( messages[m], exc.SADeprecationWarning, stacklevel=3 ) return fn(*args, **kwargs) doc = fn.__doc__ is not None and fn.__doc__ or "" if doc: doc = inject_param_text( doc, { param: ".. deprecated:: %s %s" % (version, (message or "")) for param, (version, message) in specs.items() }, ) decorated = warned(fn) decorated.__doc__ = doc return decorated return decorate def pending_deprecation( version, message=None, add_deprecation_to_docstring=True ): """Decorates a function and issues a pending deprecation warning on use. :param version: An approximate future version at which point the pending deprecation will become deprecated. Not used in messaging. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s (pending) %s" % (version, (message or "")) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SAPendingDeprecationWarning, message % dict(func=fn.__name__), header, ) return decorate def deprecated_option_value(parameter_value, default_value, warning_text): if parameter_value is None: return default_value else: warn_deprecated(warning_text) return parameter_value def _sanitize_restructured_text(text): def repl(m): type_, name = m.group(1, 2) if type_ in ("func", "meth"): name += "()" return name return re.sub(r"\:(\w+)\:`~?(?:_\w+)?\.?(.+?)`", repl, text) def _decorate_cls_with_warning( cls, constructor, wtype, message, docstring_header=None ): doc = cls.__doc__ is not None and cls.__doc__ or "" if docstring_header is not None: docstring_header %= dict(func=constructor) doc = inject_docstring_text(doc, docstring_header, 1) if type(cls) is type: clsdict = dict(cls.__dict__) clsdict["__doc__"] = doc cls = type(cls.__name__, cls.__bases__, clsdict) constructor_fn = clsdict[constructor] else: cls.__doc__ = doc constructor_fn = getattr(cls, constructor) setattr( cls, constructor, _decorate_with_warning(constructor_fn, wtype, message, None), ) return cls def _decorate_with_warning(func, wtype, message, docstring_header=None): """Wrap a function with a warnings.warn and augmented docstring.""" message = _sanitize_restructured_text(message) @decorator def warned(fn, *args, **kwargs): warnings.warn(message, wtype, stacklevel=3) return fn(*args, **kwargs) doc = func.__doc__ is not None and func.__doc__ or "" if docstring_header is not None: docstring_header %= dict(func=func.__name__) doc = inject_docstring_text(doc, docstring_header, 1) decorated = warned(func) decorated.__doc__ = doc decorated._sa_warn = lambda: warnings.warn(message, wtype, stacklevel=3) return decorated
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/util/_collections.py
# util/_collections.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Collection classes and helpers.""" from __future__ import absolute_import import operator import types import weakref from .compat import binary_types from .compat import collections_abc from .compat import itertools_filterfalse from .compat import py2k from .compat import string_types from .compat import threading EMPTY_SET = frozenset() class AbstractKeyedTuple(tuple): __slots__ = () def keys(self): """Return a list of string key names for this :class:`.KeyedTuple`. .. seealso:: :attr:`.KeyedTuple._fields` """ return list(self._fields) class KeyedTuple(AbstractKeyedTuple): """``tuple`` subclass that adds labeled names. E.g.:: >>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"]) >>> k.one 1 >>> k.two 2 Result rows returned by :class:`_query.Query` that contain multiple ORM entities and/or column expressions make use of this class to return rows. The :class:`.KeyedTuple` exhibits similar behavior to the ``collections.namedtuple()`` construct provided in the Python standard library, however is architected very differently. Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is does not rely on creation of custom subtypes in order to represent a new series of keys, instead each :class:`.KeyedTuple` instance receives its list of keys in place. The subtype approach of ``collections.namedtuple()`` introduces significant complexity and performance overhead, which is not necessary for the :class:`_query.Query` object's use case. .. seealso:: :ref:`ormtutorial_querying` """ def __new__(cls, vals, labels=None): t = tuple.__new__(cls, vals) if labels: t.__dict__.update(zip(labels, vals)) else: labels = [] t.__dict__["_labels"] = labels return t @property def _fields(self): """Return a tuple of string key names for this :class:`.KeyedTuple`. This method provides compatibility with ``collections.namedtuple()``. .. seealso:: :meth:`.KeyedTuple.keys` """ return tuple([l for l in self._labels if l is not None]) def __setattr__(self, key, value): raise AttributeError("Can't set attribute: %s" % key) def _asdict(self): """Return the contents of this :class:`.KeyedTuple` as a dictionary. This method provides compatibility with ``collections.namedtuple()``, with the exception that the dictionary returned is **not** ordered. """ return {key: self.__dict__[key] for key in self.keys()} class _LW(AbstractKeyedTuple): __slots__ = () def __new__(cls, vals): return tuple.__new__(cls, vals) def __reduce__(self): # for pickling, degrade down to the regular # KeyedTuple, thus avoiding anonymous class pickling # difficulties return KeyedTuple, (list(self), self._real_fields) def _asdict(self): """Return the contents of this :class:`.KeyedTuple` as a dictionary.""" d = dict(zip(self._real_fields, self)) d.pop(None, None) return d class ImmutableContainer(object): def _immutable(self, *arg, **kw): raise TypeError("%s object is immutable" % self.__class__.__name__) __delitem__ = __setitem__ = __setattr__ = _immutable class immutabledict(ImmutableContainer, dict): clear = pop = popitem = setdefault = update = ImmutableContainer._immutable def __new__(cls, *args): new = dict.__new__(cls) dict.__init__(new, *args) return new def __init__(self, *args): pass def __reduce__(self): return immutabledict, (dict(self),) def union(self, d): if not d: return self elif not self: if isinstance(d, immutabledict): return d else: return immutabledict(d) else: d2 = immutabledict(self) dict.update(d2, d) return d2 def __repr__(self): return "immutabledict(%s)" % dict.__repr__(self) class Properties(object): """Provide a __getattr__/__setattr__ interface over a dict.""" __slots__ = ("_data",) def __init__(self, data): object.__setattr__(self, "_data", data) def __len__(self): return len(self._data) def __iter__(self): return iter(list(self._data.values())) def __dir__(self): return dir(super(Properties, self)) + [ str(k) for k in self._data.keys() ] def __add__(self, other): return list(self) + list(other) def __setitem__(self, key, obj): self._data[key] = obj def __getitem__(self, key): return self._data[key] def __delitem__(self, key): del self._data[key] def __setattr__(self, key, obj): self._data[key] = obj def __getstate__(self): return {"_data": self._data} def __setstate__(self, state): object.__setattr__(self, "_data", state["_data"]) def __getattr__(self, key): try: return self._data[key] except KeyError: raise AttributeError(key) def __contains__(self, key): return key in self._data def as_immutable(self): """Return an immutable proxy for this :class:`.Properties`.""" return ImmutableProperties(self._data) def update(self, value): self._data.update(value) def get(self, key, default=None): if key in self: return self[key] else: return default def keys(self): return list(self._data) def values(self): return list(self._data.values()) def items(self): return list(self._data.items()) def has_key(self, key): return key in self._data def clear(self): self._data.clear() class OrderedProperties(Properties): """Provide a __getattr__/__setattr__ interface with an OrderedDict as backing store.""" __slots__ = () def __init__(self): Properties.__init__(self, OrderedDict()) class ImmutableProperties(ImmutableContainer, Properties): """Provide immutable dict/object attribute to an underlying dictionary.""" __slots__ = () class OrderedDict(dict): """A dict that returns keys/values/items in the order they were added.""" __slots__ = ("_list",) def __reduce__(self): return OrderedDict, (self.items(),) def __init__(self, ____sequence=None, **kwargs): self._list = [] if ____sequence is None: if kwargs: self.update(**kwargs) else: self.update(____sequence, **kwargs) def clear(self): self._list = [] dict.clear(self) def copy(self): return self.__copy__() def __copy__(self): return OrderedDict(self) def sort(self, *arg, **kw): self._list.sort(*arg, **kw) def update(self, ____sequence=None, **kwargs): if ____sequence is not None: if hasattr(____sequence, "keys"): for key in ____sequence.keys(): self.__setitem__(key, ____sequence[key]) else: for key, value in ____sequence: self[key] = value if kwargs: self.update(kwargs) def setdefault(self, key, value): if key not in self: self.__setitem__(key, value) return value else: return self.__getitem__(key) def __iter__(self): return iter(self._list) def keys(self): return list(self) def values(self): return [self[key] for key in self._list] def items(self): return [(key, self[key]) for key in self._list] if py2k: def itervalues(self): return iter(self.values()) def iterkeys(self): return iter(self) def iteritems(self): return iter(self.items()) def __setitem__(self, key, obj): if key not in self: try: self._list.append(key) except AttributeError: # work around Python pickle loads() with # dict subclass (seems to ignore __setstate__?) self._list = [key] dict.__setitem__(self, key, obj) def __delitem__(self, key): dict.__delitem__(self, key) self._list.remove(key) def pop(self, key, *default): present = key in self value = dict.pop(self, key, *default) if present: self._list.remove(key) return value def popitem(self): item = dict.popitem(self) self._list.remove(item[0]) return item class OrderedSet(set): def __init__(self, d=None): set.__init__(self) self._list = [] if d is not None: self._list = unique_list(d) set.update(self, self._list) else: self._list = [] def add(self, element): if element not in self: self._list.append(element) set.add(self, element) def remove(self, element): set.remove(self, element) self._list.remove(element) def insert(self, pos, element): if element not in self: self._list.insert(pos, element) set.add(self, element) def discard(self, element): if element in self: self._list.remove(element) set.remove(self, element) def clear(self): set.clear(self) self._list = [] def __getitem__(self, key): return self._list[key] def __iter__(self): return iter(self._list) def __add__(self, other): return self.union(other) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._list) __str__ = __repr__ def update(self, iterable): for e in iterable: if e not in self: self._list.append(e) set.add(self, e) return self __ior__ = update def union(self, other): result = self.__class__(self) result.update(other) return result __or__ = union def intersection(self, other): other = set(other) return self.__class__(a for a in self if a in other) __and__ = intersection def symmetric_difference(self, other): other = set(other) result = self.__class__(a for a in self if a not in other) result.update(a for a in other if a not in self) return result __xor__ = symmetric_difference def difference(self, other): other = set(other) return self.__class__(a for a in self if a not in other) __sub__ = difference def intersection_update(self, other): other = set(other) set.intersection_update(self, other) self._list = [a for a in self._list if a in other] return self __iand__ = intersection_update def symmetric_difference_update(self, other): set.symmetric_difference_update(self, other) self._list = [a for a in self._list if a in self] self._list += [a for a in other._list if a in self] return self __ixor__ = symmetric_difference_update def difference_update(self, other): set.difference_update(self, other) self._list = [a for a in self._list if a in self] return self __isub__ = difference_update class IdentitySet(object): """A set that considers only object id() for uniqueness. This strategy has edge cases for builtin types- it's possible to have two 'foo' strings in one of these sets, for example. Use sparingly. """ def __init__(self, iterable=None): self._members = dict() if iterable: self.update(iterable) def add(self, value): self._members[id(value)] = value def __contains__(self, value): return id(value) in self._members def remove(self, value): del self._members[id(value)] def discard(self, value): try: self.remove(value) except KeyError: pass def pop(self): try: pair = self._members.popitem() return pair[1] except KeyError: raise KeyError("pop from an empty set") def clear(self): self._members.clear() def __cmp__(self, other): raise TypeError("cannot compare sets using cmp()") def __eq__(self, other): if isinstance(other, IdentitySet): return self._members == other._members else: return False def __ne__(self, other): if isinstance(other, IdentitySet): return self._members != other._members else: return True def issubset(self, iterable): other = self.__class__(iterable) if len(self) > len(other): return False for m in itertools_filterfalse( other._members.__contains__, iter(self._members.keys()) ): return False return True def __le__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issubset(other) def __lt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) < len(other) and self.issubset(other) def issuperset(self, iterable): other = self.__class__(iterable) if len(self) < len(other): return False for m in itertools_filterfalse( self._members.__contains__, iter(other._members.keys()) ): return False return True def __ge__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issuperset(other) def __gt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) > len(other) and self.issuperset(other) def union(self, iterable): result = self.__class__() members = self._members result._members.update(members) result._members.update((id(obj), obj) for obj in iterable) return result def __or__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.union(other) def update(self, iterable): self._members.update((id(obj), obj) for obj in iterable) def __ior__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.update(other) return self def difference(self, iterable): result = self.__class__() members = self._members other = {id(obj) for obj in iterable} result._members.update( ((k, v) for k, v in members.items() if k not in other) ) return result def __sub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.difference(other) def difference_update(self, iterable): self._members = self.difference(iterable)._members def __isub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.difference_update(other) return self def intersection(self, iterable): result = self.__class__() members = self._members other = {id(obj) for obj in iterable} result._members.update( (k, v) for k, v in members.items() if k in other ) return result def __and__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.intersection(other) def intersection_update(self, iterable): self._members = self.intersection(iterable)._members def __iand__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.intersection_update(other) return self def symmetric_difference(self, iterable): result = self.__class__() members = self._members other = {id(obj): obj for obj in iterable} result._members.update( ((k, v) for k, v in members.items() if k not in other) ) result._members.update( ((k, v) for k, v in other.items() if k not in members) ) return result def __xor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.symmetric_difference(other) def symmetric_difference_update(self, iterable): self._members = self.symmetric_difference(iterable)._members def __ixor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.symmetric_difference(other) return self def copy(self): return type(self)(iter(self._members.values())) __copy__ = copy def __len__(self): return len(self._members) def __iter__(self): return iter(self._members.values()) def __hash__(self): raise TypeError("set objects are unhashable") def __repr__(self): return "%s(%r)" % (type(self).__name__, list(self._members.values())) class WeakSequence(object): def __init__(self, __elements=()): # adapted from weakref.WeakKeyDictionary, prevent reference # cycles in the collection itself def _remove(item, selfref=weakref.ref(self)): self = selfref() if self is not None: self._storage.remove(item) self._remove = _remove self._storage = [ weakref.ref(element, _remove) for element in __elements ] def append(self, item): self._storage.append(weakref.ref(item, self._remove)) def __len__(self): return len(self._storage) def __iter__(self): return ( obj for obj in (ref() for ref in self._storage) if obj is not None ) def __getitem__(self, index): try: obj = self._storage[index] except KeyError: raise IndexError("Index %s out of range" % index) else: return obj() class OrderedIdentitySet(IdentitySet): def __init__(self, iterable=None): IdentitySet.__init__(self) self._members = OrderedDict() if iterable: for o in iterable: self.add(o) class PopulateDict(dict): """A dict which populates missing values via a creation function. Note the creation function takes a key, unlike collections.defaultdict. """ def __init__(self, creator): self.creator = creator def __missing__(self, key): self[key] = val = self.creator(key) return val class WeakPopulateDict(dict): """Like PopulateDict, but assumes a self + a method and does not create a reference cycle. """ def __init__(self, creator_method): self.creator = creator_method.__func__ weakself = creator_method.__self__ self.weakself = weakref.ref(weakself) def __missing__(self, key): self[key] = val = self.creator(self.weakself(), key) return val # Define collections that are capable of storing # ColumnElement objects as hashable keys/elements. # At this point, these are mostly historical, things # used to be more complicated. column_set = set column_dict = dict ordered_column_set = OrderedSet _getters = PopulateDict(operator.itemgetter) _property_getters = PopulateDict( lambda idx: property(operator.itemgetter(idx)) ) def unique_list(seq, hashfunc=None): seen = set() seen_add = seen.add if not hashfunc: return [x for x in seq if x not in seen and not seen_add(x)] else: return [ x for x in seq if hashfunc(x) not in seen and not seen_add(hashfunc(x)) ] class UniqueAppender(object): """Appends items to a collection ensuring uniqueness. Additional appends() of the same object are ignored. Membership is determined by identity (``is a``) not equality (``==``). """ def __init__(self, data, via=None): self.data = data self._unique = {} if via: self._data_appender = getattr(data, via) elif hasattr(data, "append"): self._data_appender = data.append elif hasattr(data, "add"): self._data_appender = data.add def append(self, item): id_ = id(item) if id_ not in self._unique: self._data_appender(item) self._unique[id_] = True def __iter__(self): return iter(self.data) def coerce_generator_arg(arg): if len(arg) == 1 and isinstance(arg[0], types.GeneratorType): return list(arg[0]) else: return arg def to_list(x, default=None): if x is None: return default if not isinstance(x, collections_abc.Iterable) or isinstance( x, string_types + binary_types ): return [x] elif isinstance(x, list): return x else: return list(x) def has_intersection(set_, iterable): r"""return True if any items of set\_ are present in iterable. Goes through special effort to ensure __hash__ is not called on items in iterable that don't support it. """ # TODO: optimize, write in C, etc. return bool(set_.intersection([i for i in iterable if i.__hash__])) def to_set(x): if x is None: return set() if not isinstance(x, set): return set(to_list(x)) else: return x def to_column_set(x): if x is None: return column_set() if not isinstance(x, column_set): return column_set(to_list(x)) else: return x def update_copy(d, _new=None, **kw): """Copy the given dict and update with the given values.""" d = d.copy() if _new: d.update(_new) d.update(**kw) return d def flatten_iterator(x): """Given an iterator of which further sub-elements may also be iterators, flatten the sub-elements into a single iterator. """ for elem in x: if not isinstance(elem, str) and hasattr(elem, "__iter__"): for y in flatten_iterator(elem): yield y else: yield elem class LRUCache(dict): """Dictionary with 'squishy' removal of least recently used items. Note that either get() or [] should be used here, but generally its not safe to do an "in" check first as the dictionary can change subsequent to that call. """ __slots__ = "capacity", "threshold", "size_alert", "_counter", "_mutex" def __init__(self, capacity=100, threshold=0.5, size_alert=None): self.capacity = capacity self.threshold = threshold self.size_alert = size_alert self._counter = 0 self._mutex = threading.Lock() def _inc_counter(self): self._counter += 1 return self._counter def get(self, key, default=None): item = dict.get(self, key, default) if item is not default: item[2] = self._inc_counter() return item[1] else: return default def __getitem__(self, key): item = dict.__getitem__(self, key) item[2] = self._inc_counter() return item[1] def values(self): return [i[1] for i in dict.values(self)] def setdefault(self, key, value): if key in self: return self[key] else: self[key] = value return value def __setitem__(self, key, value): item = dict.get(self, key) if item is None: item = [key, value, self._inc_counter()] dict.__setitem__(self, key, item) else: item[1] = value self._manage_size() @property def size_threshold(self): return self.capacity + self.capacity * self.threshold def _manage_size(self): if not self._mutex.acquire(False): return try: size_alert = bool(self.size_alert) while len(self) > self.capacity + self.capacity * self.threshold: if size_alert: size_alert = False self.size_alert(self) by_counter = sorted( dict.values(self), key=operator.itemgetter(2), reverse=True ) for item in by_counter[self.capacity :]: try: del self[item[0]] except KeyError: # deleted elsewhere; skip continue finally: self._mutex.release() _lw_tuples = LRUCache(100) def lightweight_named_tuple(name, fields): hash_ = (name,) + tuple(fields) tp_cls = _lw_tuples.get(hash_) if tp_cls: return tp_cls tp_cls = type( name, (_LW,), dict( [ (field, _property_getters[idx]) for idx, field in enumerate(fields) if field is not None ] + [("__slots__", ())] ), ) tp_cls._real_fields = fields tp_cls._fields = tuple([f for f in fields if f is not None]) _lw_tuples[hash_] = tp_cls return tp_cls class ScopedRegistry(object): """A Registry that can store one or multiple instances of a single class on the basis of a "scope" function. The object implements ``__call__`` as the "getter", so by calling ``myregistry()`` the contained object is returned for the current scope. :param createfunc: a callable that returns a new object to be placed in the registry :param scopefunc: a callable that will return a key to store/retrieve an object. """ def __init__(self, createfunc, scopefunc): """Construct a new :class:`.ScopedRegistry`. :param createfunc: A creation function that will generate a new value for the current scope, if none is present. :param scopefunc: A function that returns a hashable token representing the current scope (such as, current thread identifier). """ self.createfunc = createfunc self.scopefunc = scopefunc self.registry = {} def __call__(self): key = self.scopefunc() try: return self.registry[key] except KeyError: return self.registry.setdefault(key, self.createfunc()) def has(self): """Return True if an object is present in the current scope.""" return self.scopefunc() in self.registry def set(self, obj): """Set the value for the current scope.""" self.registry[self.scopefunc()] = obj def clear(self): """Clear the current scope, if any.""" try: del self.registry[self.scopefunc()] except KeyError: pass class ThreadLocalRegistry(ScopedRegistry): """A :class:`.ScopedRegistry` that uses a ``threading.local()`` variable for storage. """ def __init__(self, createfunc): self.createfunc = createfunc self.registry = threading.local() def __call__(self): try: return self.registry.value except AttributeError: val = self.registry.value = self.createfunc() return val def has(self): return hasattr(self.registry, "value") def set(self, obj): self.registry.value = obj def clear(self): try: del self.registry.value except AttributeError: pass def has_dupes(sequence, target): """Given a sequence and search object, return True if there's more than one, False if zero or one of them. """ # compare to .index version below, this version introduces less function # overhead and is usually the same speed. At 15000 items (way bigger than # a relationship-bound collection in memory usually is) it begins to # fall behind the other version only by microseconds. c = 0 for item in sequence: if item is target: c += 1 if c > 1: return True return False # .index version. the two __contains__ calls as well # as .index() and isinstance() slow this down. # def has_dupes(sequence, target): # if target not in sequence: # return False # elif not isinstance(sequence, collections_abc.Sequence): # return False # # idx = sequence.index(target) # return target in sequence[idx + 1:]
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/util/langhelpers.py
# util/langhelpers.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to help with the creation, loading and introspection of modules, classes, hierarchies, attributes, functions, and methods. """ from functools import update_wrapper import hashlib import inspect import itertools import operator import re import sys import textwrap import types import warnings from . import _collections from . import compat from .. import exc def md5_hex(x): if compat.py3k: x = x.encode("utf-8") m = hashlib.md5() m.update(x) return m.hexdigest() class safe_reraise(object): """Reraise an exception after invoking some handler code. Stores the existing exception info before invoking so that it is maintained across a potential coroutine context switch. e.g.:: try: sess.commit() except: with safe_reraise(): sess.rollback() """ __slots__ = ("warn_only", "_exc_info") def __init__(self, warn_only=False): self.warn_only = warn_only def __enter__(self): self._exc_info = sys.exc_info() def __exit__(self, type_, value, traceback): # see #2703 for notes if type_ is None: exc_type, exc_value, exc_tb = self._exc_info self._exc_info = None # remove potential circular references if not self.warn_only: compat.raise_( exc_value, with_traceback=exc_tb, ) else: if not compat.py3k and self._exc_info and self._exc_info[1]: # emulate Py3K's behavior of telling us when an exception # occurs in an exception handler. warn( "An exception has occurred during handling of a " "previous exception. The previous exception " "is:\n %s %s\n" % (self._exc_info[0], self._exc_info[1]) ) self._exc_info = None # remove potential circular references compat.raise_(value, with_traceback=traceback) def clsname_as_plain_name(cls): return " ".join( n.lower() for n in re.findall(r"([A-Z][a-z]+)", cls.__name__) ) def decode_slice(slc): """decode a slice object as sent to __getitem__. takes into account the 2.5 __index__() method, basically. """ ret = [] for x in slc.start, slc.stop, slc.step: if hasattr(x, "__index__"): x = x.__index__() ret.append(x) return tuple(ret) def _unique_symbols(used, *bases): used = set(used) for base in bases: pool = itertools.chain( (base,), compat.itertools_imap(lambda i: base + str(i), range(1000)), ) for sym in pool: if sym not in used: used.add(sym) yield sym break else: raise NameError("exhausted namespace for symbol base %s" % base) def map_bits(fn, n): """Call the given function given each nonzero bit from n.""" while n: b = n & (~n + 1) yield fn(b) n ^= b def decorator(target): """A signature-matching decorator factory.""" def decorate(fn): if not inspect.isfunction(fn) and not inspect.ismethod(fn): raise Exception("not a decoratable function") spec = compat.inspect_getfullargspec(fn) names = tuple(spec[0]) + spec[1:3] + (fn.__name__,) targ_name, fn_name = _unique_symbols(names, "target", "fn") metadata = dict(target=targ_name, fn=fn_name) metadata.update(format_argspec_plus(spec, grouped=False)) metadata["name"] = fn.__name__ code = ( """\ def %(name)s(%(args)s): return %(target)s(%(fn)s, %(apply_kw)s) """ % metadata ) decorated = _exec_code_in_env( code, {targ_name: target, fn_name: fn}, fn.__name__ ) decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__ decorated.__wrapped__ = fn return update_wrapper(decorated, fn) return update_wrapper(decorate, target) def _exec_code_in_env(code, env, fn_name): exec(code, env) return env[fn_name] def public_factory(target, location, class_location=None): """Produce a wrapping function for the given cls or classmethod. Rationale here is so that the __init__ method of the class can serve as documentation for the function. """ if isinstance(target, type): fn = target.__init__ callable_ = target doc = ( "Construct a new :class:`.%s` object. \n\n" "This constructor is mirrored as a public API function; " "see :func:`sqlalchemy%s` " "for a full usage and argument description." % (target.__name__, location) ) else: fn = callable_ = target doc = ( "This function is mirrored; see :func:`sqlalchemy%s` " "for a description of arguments." % location ) location_name = location.split(".")[-1] spec = compat.inspect_getfullargspec(fn) del spec[0][0] metadata = format_argspec_plus(spec, grouped=False) metadata["name"] = location_name code = ( """\ def %(name)s(%(args)s): return cls(%(apply_kw)s) """ % metadata ) env = {"cls": callable_, "symbol": symbol} exec(code, env) decorated = env[location_name] if hasattr(fn, "_linked_to"): linked_to, linked_to_location = fn._linked_to linked_to_doc = linked_to.__doc__ if class_location is None: class_location = "%s.%s" % (target.__module__, target.__name__) linked_to_doc = inject_docstring_text( linked_to_doc, ".. container:: inherited_member\n\n " "Inherited from :func:`sqlalchemy%s`; this constructor " "creates a :class:`%s` object" % (linked_to_location, class_location), 1, ) decorated.__doc__ = linked_to_doc else: decorated.__doc__ = fn.__doc__ decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0] if decorated.__module__ not in sys.modules: raise ImportError( "public_factory location %s is not in sys.modules" % (decorated.__module__,) ) if compat.py2k or hasattr(fn, "__func__"): fn.__func__.__doc__ = doc if not hasattr(fn.__func__, "_linked_to"): fn.__func__._linked_to = (decorated, location) else: fn.__doc__ = doc if not hasattr(fn, "_linked_to"): fn._linked_to = (decorated, location) return decorated class PluginLoader(object): def __init__(self, group, auto_fn=None): self.group = group self.impls = {} self.auto_fn = auto_fn def clear(self): self.impls.clear() def load(self, name): if name in self.impls: return self.impls[name]() if self.auto_fn: loader = self.auto_fn(name) if loader: self.impls[name] = loader return loader() try: import pkg_resources except ImportError: pass else: for impl in pkg_resources.iter_entry_points(self.group, name): self.impls[name] = impl.load return impl.load() raise exc.NoSuchModuleError( "Can't load plugin: %s:%s" % (self.group, name) ) def register(self, name, modulepath, objname): def load(): mod = compat.import_(modulepath) for token in modulepath.split(".")[1:]: mod = getattr(mod, token) return getattr(mod, objname) self.impls[name] = load def _inspect_func_args(fn): try: co_varkeywords = inspect.CO_VARKEYWORDS except AttributeError: # https://docs.python.org/3/library/inspect.html # The flags are specific to CPython, and may not be defined in other # Python implementations. Furthermore, the flags are an implementation # detail, and can be removed or deprecated in future Python releases. spec = compat.inspect_getfullargspec(fn) return spec[0], bool(spec[2]) else: # use fn.__code__ plus flags to reduce method call overhead co = fn.__code__ nargs = co.co_argcount return ( list(co.co_varnames[:nargs]), bool(co.co_flags & co_varkeywords), ) def get_cls_kwargs(cls, _set=None): r"""Return the full set of inherited kwargs for the given `cls`. Probes a class's __init__ method, collecting all named arguments. If the __init__ defines a \**kwargs catch-all, then the constructor is presumed to pass along unrecognized keywords to its base classes, and the collection process is repeated recursively on each of the bases. Uses a subset of inspect.getfullargspec() to cut down on method overhead, as this is used within the Core typing system to create copies of type objects which is a performance-sensitive operation. No anonymous tuple arguments please ! """ toplevel = _set is None if toplevel: _set = set() ctr = cls.__dict__.get("__init__", False) has_init = ( ctr and isinstance(ctr, types.FunctionType) and isinstance(ctr.__code__, types.CodeType) ) if has_init: names, has_kw = _inspect_func_args(ctr) _set.update(names) if not has_kw and not toplevel: return None if not has_init or has_kw: for c in cls.__bases__: if get_cls_kwargs(c, _set) is None: break _set.discard("self") return _set def get_func_kwargs(func): """Return the set of legal kwargs for the given `func`. Uses getargspec so is safe to call for methods, functions, etc. """ return compat.inspect_getfullargspec(func)[0] def get_callable_argspec(fn, no_self=False, _is_init=False): """Return the argument signature for any callable. All pure-Python callables are accepted, including functions, methods, classes, objects with __call__; builtins and other edge cases like functools.partial() objects raise a TypeError. """ if inspect.isbuiltin(fn): raise TypeError("Can't inspect builtin: %s" % fn) elif inspect.isfunction(fn): if _is_init and no_self: spec = compat.inspect_getfullargspec(fn) return compat.FullArgSpec( spec.args[1:], spec.varargs, spec.varkw, spec.defaults, spec.kwonlyargs, spec.kwonlydefaults, spec.annotations, ) else: return compat.inspect_getfullargspec(fn) elif inspect.ismethod(fn): if no_self and (_is_init or fn.__self__): spec = compat.inspect_getfullargspec(fn.__func__) return compat.FullArgSpec( spec.args[1:], spec.varargs, spec.varkw, spec.defaults, spec.kwonlyargs, spec.kwonlydefaults, spec.annotations, ) else: return compat.inspect_getfullargspec(fn.__func__) elif inspect.isclass(fn): return get_callable_argspec( fn.__init__, no_self=no_self, _is_init=True ) elif hasattr(fn, "__func__"): return compat.inspect_getfullargspec(fn.__func__) elif hasattr(fn, "__call__"): if inspect.ismethod(fn.__call__): return get_callable_argspec(fn.__call__, no_self=no_self) else: raise TypeError("Can't inspect callable: %s" % fn) else: raise TypeError("Can't inspect callable: %s" % fn) def format_argspec_plus(fn, grouped=True): """Returns a dictionary of formatted, introspected function arguments. A enhanced variant of inspect.formatargspec to support code generation. fn An inspectable callable or tuple of inspect getargspec() results. grouped Defaults to True; include (parens, around, argument) lists Returns: args Full inspect.formatargspec for fn self_arg The name of the first positional argument, varargs[0], or None if the function defines no positional arguments. apply_pos args, re-written in calling rather than receiving syntax. Arguments are passed positionally. apply_kw Like apply_pos, except keyword-ish args are passed as keywords. Example:: >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) {'args': '(self, a, b, c=3, **d)', 'self_arg': 'self', 'apply_kw': '(self, a, b, c=c, **d)', 'apply_pos': '(self, a, b, c, **d)'} """ if compat.callable(fn): spec = compat.inspect_getfullargspec(fn) else: spec = fn args = compat.inspect_formatargspec(*spec) if spec[0]: self_arg = spec[0][0] elif spec[1]: self_arg = "%s[0]" % spec[1] else: self_arg = None apply_pos = compat.inspect_formatargspec( spec[0], spec[1], spec[2], None, spec[4] ) num_defaults = 0 if spec[3]: num_defaults += len(spec[3]) if spec[4]: num_defaults += len(spec[4]) name_args = spec[0] + spec[4] if num_defaults: defaulted_vals = name_args[0 - num_defaults :] else: defaulted_vals = () apply_kw = compat.inspect_formatargspec( name_args, spec[1], spec[2], defaulted_vals, formatvalue=lambda x: "=" + x, ) if grouped: return dict( args=args, self_arg=self_arg, apply_pos=apply_pos, apply_kw=apply_kw, ) else: return dict( args=args[1:-1], self_arg=self_arg, apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1], ) def format_argspec_init(method, grouped=True): """format_argspec_plus with considerations for typical __init__ methods Wraps format_argspec_plus with error handling strategies for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ if method is object.__init__: args = grouped and "(self)" or "self" else: try: return format_argspec_plus(method, grouped=grouped) except TypeError: args = ( grouped and "(self, *args, **kwargs)" or "self, *args, **kwargs" ) return dict(self_arg="self", args=args, apply_pos=args, apply_kw=args) def getargspec_init(method): """inspect.getargspec with considerations for typical __init__ methods Wraps inspect.getargspec with error handling for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ try: return compat.inspect_getfullargspec(method) except TypeError: if method is object.__init__: return (["self"], None, None, None) else: return (["self"], "args", "kwargs", None) def unbound_method_to_callable(func_or_cls): """Adjust the incoming callable such that a 'self' argument is not required. """ if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__: return func_or_cls.__func__ else: return func_or_cls def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()): """Produce a __repr__() based on direct association of the __init__() specification vs. same-named attributes present. """ if to_inspect is None: to_inspect = [obj] else: to_inspect = _collections.to_list(to_inspect) missing = object() pos_args = [] kw_args = _collections.OrderedDict() vargs = None for i, insp in enumerate(to_inspect): try: spec = compat.inspect_getfullargspec(insp.__init__) except TypeError: continue else: default_len = spec.defaults and len(spec.defaults) or 0 if i == 0: if spec.varargs: vargs = spec.varargs if default_len: pos_args.extend(spec.args[1:-default_len]) else: pos_args.extend(spec.args[1:]) else: kw_args.update( [(arg, missing) for arg in spec.args[1:-default_len]] ) if default_len: kw_args.update( [ (arg, default) for arg, default in zip( spec.args[-default_len:], spec.defaults ) ] ) output = [] output.extend(repr(getattr(obj, arg, None)) for arg in pos_args) if vargs is not None and hasattr(obj, vargs): output.extend([repr(val) for val in getattr(obj, vargs)]) for arg, defval in kw_args.items(): if arg in omit_kwarg: continue try: val = getattr(obj, arg, missing) if val is not missing and val != defval: output.append("%s=%r" % (arg, val)) except Exception: pass if additional_kw: for arg, defval in additional_kw: try: val = getattr(obj, arg, missing) if val is not missing and val != defval: output.append("%s=%r" % (arg, val)) except Exception: pass return "%s(%s)" % (obj.__class__.__name__, ", ".join(output)) class portable_instancemethod(object): """Turn an instancemethod into a (parent, name) pair to produce a serializable callable. """ __slots__ = "target", "name", "kwargs", "__weakref__" def __getstate__(self): return { "target": self.target, "name": self.name, "kwargs": self.kwargs, } def __setstate__(self, state): self.target = state["target"] self.name = state["name"] self.kwargs = state.get("kwargs", ()) def __init__(self, meth, kwargs=()): self.target = meth.__self__ self.name = meth.__name__ self.kwargs = kwargs def __call__(self, *arg, **kw): kw.update(self.kwargs) return getattr(self.target, self.name)(*arg, **kw) def class_hierarchy(cls): """Return an unordered sequence of all classes related to cls. Traverses diamond hierarchies. Fibs slightly: subclasses of builtin types are not returned. Thus class_hierarchy(class A(object)) returns (A, object), not A plus every class systemwide that derives from object. Old-style classes are discarded and hierarchies rooted on them will not be descended. """ if compat.py2k: if isinstance(cls, types.ClassType): return list() hier = {cls} process = list(cls.__mro__) while process: c = process.pop() if compat.py2k: if isinstance(c, types.ClassType): continue bases = ( _ for _ in c.__bases__ if _ not in hier and not isinstance(_, types.ClassType) ) else: bases = (_ for _ in c.__bases__ if _ not in hier) for b in bases: process.append(b) hier.add(b) if compat.py3k: if c.__module__ == "builtins" or not hasattr(c, "__subclasses__"): continue else: if c.__module__ == "__builtin__" or not hasattr( c, "__subclasses__" ): continue for s in [_ for _ in c.__subclasses__() if _ not in hier]: process.append(s) hier.add(s) return list(hier) def iterate_attributes(cls): """iterate all the keys and attributes associated with a class, without using getattr(). Does not use getattr() so that class-sensitive descriptors (i.e. property.__get__()) are not called. """ keys = dir(cls) for key in keys: for c in cls.__mro__: if key in c.__dict__: yield (key, c.__dict__[key]) break def monkeypatch_proxied_specials( into_cls, from_cls, skip=None, only=None, name="self.proxy", from_instance=None, ): """Automates delegation of __specials__ for a proxying type.""" if only: dunders = only else: if skip is None: skip = ( "__slots__", "__del__", "__getattribute__", "__metaclass__", "__getstate__", "__setstate__", ) dunders = [ m for m in dir(from_cls) if ( m.startswith("__") and m.endswith("__") and not hasattr(into_cls, m) and m not in skip ) ] for method in dunders: try: fn = getattr(from_cls, method) if not hasattr(fn, "__call__"): continue fn = getattr(fn, "im_func", fn) except AttributeError: continue try: spec = compat.inspect_getfullargspec(fn) fn_args = compat.inspect_formatargspec(spec[0]) d_args = compat.inspect_formatargspec(spec[0][1:]) except TypeError: fn_args = "(self, *args, **kw)" d_args = "(*args, **kw)" py = ( "def %(method)s%(fn_args)s: " "return %(name)s.%(method)s%(d_args)s" % locals() ) env = from_instance is not None and {name: from_instance} or {} compat.exec_(py, env) try: env[method].__defaults__ = fn.__defaults__ except AttributeError: pass setattr(into_cls, method, env[method]) def methods_equivalent(meth1, meth2): """Return True if the two methods are the same implementation.""" return getattr(meth1, "__func__", meth1) is getattr( meth2, "__func__", meth2 ) def as_interface(obj, cls=None, methods=None, required=None): """Ensure basic interface compliance for an instance or dict of callables. Checks that ``obj`` implements public methods of ``cls`` or has members listed in ``methods``. If ``required`` is not supplied, implementing at least one interface method is sufficient. Methods present on ``obj`` that are not in the interface are ignored. If ``obj`` is a dict and ``dict`` does not meet the interface requirements, the keys of the dictionary are inspected. Keys present in ``obj`` that are not in the interface will raise TypeErrors. Raises TypeError if ``obj`` does not meet the interface criteria. In all passing cases, an object with callable members is returned. In the simple case, ``obj`` is returned as-is; if dict processing kicks in then an anonymous class is returned. obj A type, instance, or dictionary of callables. cls Optional, a type. All public methods of cls are considered the interface. An ``obj`` instance of cls will always pass, ignoring ``required``.. methods Optional, a sequence of method names to consider as the interface. required Optional, a sequence of mandatory implementations. If omitted, an ``obj`` that provides at least one interface method is considered sufficient. As a convenience, required may be a type, in which case all public methods of the type are required. """ if not cls and not methods: raise TypeError("a class or collection of method names are required") if isinstance(cls, type) and isinstance(obj, cls): return obj interface = set(methods or [m for m in dir(cls) if not m.startswith("_")]) implemented = set(dir(obj)) complies = operator.ge if isinstance(required, type): required = interface elif not required: required = set() complies = operator.gt else: required = set(required) if complies(implemented.intersection(interface), required): return obj # No dict duck typing here. if not isinstance(obj, dict): qualifier = complies is operator.gt and "any of" or "all of" raise TypeError( "%r does not implement %s: %s" % (obj, qualifier, ", ".join(interface)) ) class AnonymousInterface(object): """A callable-holding shell.""" if cls: AnonymousInterface.__name__ = "Anonymous" + cls.__name__ found = set() for method, impl in dictlike_iteritems(obj): if method not in interface: raise TypeError("%r: unknown in this interface" % method) if not compat.callable(impl): raise TypeError("%r=%r is not callable" % (method, impl)) setattr(AnonymousInterface, method, staticmethod(impl)) found.add(method) if complies(found, required): return AnonymousInterface raise TypeError( "dictionary does not contain required keys %s" % ", ".join(required - found) ) class memoized_property(object): """A read-only @property that is only evaluated once.""" def __init__(self, fget, doc=None): self.fget = fget self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ def __get__(self, obj, cls): if obj is None: return self obj.__dict__[self.__name__] = result = self.fget(obj) return result def _reset(self, obj): memoized_property.reset(obj, self.__name__) @classmethod def reset(cls, obj, name): obj.__dict__.pop(name, None) def memoized_instancemethod(fn): """Decorate a method memoize its return value. Best applied to no-arg methods: memoization is not sensitive to argument values, and will always return the same value even when called with different arguments. """ def oneshot(self, *args, **kw): result = fn(self, *args, **kw) def memo(*a, **kw): return result memo.__name__ = fn.__name__ memo.__doc__ = fn.__doc__ self.__dict__[fn.__name__] = memo return result return update_wrapper(oneshot, fn) class group_expirable_memoized_property(object): """A family of @memoized_properties that can be expired in tandem.""" def __init__(self, attributes=()): self.attributes = [] if attributes: self.attributes.extend(attributes) def expire_instance(self, instance): """Expire all memoized properties for *instance*.""" stash = instance.__dict__ for attribute in self.attributes: stash.pop(attribute, None) def __call__(self, fn): self.attributes.append(fn.__name__) return memoized_property(fn) def method(self, fn): self.attributes.append(fn.__name__) return memoized_instancemethod(fn) class MemoizedSlots(object): """Apply memoized items to an object using a __getattr__ scheme. This allows the functionality of memoized_property and memoized_instancemethod to be available to a class using __slots__. """ __slots__ = () def _fallback_getattr(self, key): raise AttributeError(key) def __getattr__(self, key): if key.startswith("_memoized"): raise AttributeError(key) elif hasattr(self, "_memoized_attr_%s" % key): value = getattr(self, "_memoized_attr_%s" % key)() setattr(self, key, value) return value elif hasattr(self, "_memoized_method_%s" % key): fn = getattr(self, "_memoized_method_%s" % key) def oneshot(*args, **kw): result = fn(*args, **kw) def memo(*a, **kw): return result memo.__name__ = fn.__name__ memo.__doc__ = fn.__doc__ setattr(self, key, memo) return result oneshot.__doc__ = fn.__doc__ return oneshot else: return self._fallback_getattr(key) def dependency_for(modulename, add_to_all=False): def decorate(obj): tokens = modulename.split(".") mod = compat.import_( ".".join(tokens[0:-1]), globals(), locals(), [tokens[-1]] ) mod = getattr(mod, tokens[-1]) setattr(mod, obj.__name__, obj) if add_to_all and hasattr(mod, "__all__"): mod.__all__.append(obj.__name__) return obj return decorate class dependencies(object): """Apply imported dependencies as arguments to a function. E.g.:: @util.dependencies( "sqlalchemy.sql.widget", "sqlalchemy.engine.default" ); def some_func(self, widget, default, arg1, arg2, **kw): # ... Rationale is so that the impact of a dependency cycle can be associated directly with the few functions that cause the cycle, and not pollute the module-level namespace. """ def __init__(self, *deps): self.import_deps = [] for dep in deps: tokens = dep.split(".") self.import_deps.append( dependencies._importlater(".".join(tokens[0:-1]), tokens[-1]) ) def __call__(self, fn): import_deps = self.import_deps spec = compat.inspect_getfullargspec(fn) spec_zero = list(spec[0]) hasself = spec_zero[0] in ("self", "cls") for i in range(len(import_deps)): spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i inner_spec = format_argspec_plus(spec, grouped=False) for impname in import_deps: del spec_zero[1 if hasself else 0] spec[0][:] = spec_zero outer_spec = format_argspec_plus(spec, grouped=False) code = "lambda %(args)s: fn(%(apply_kw)s)" % { "args": outer_spec["args"], "apply_kw": inner_spec["apply_kw"], } decorated = eval(code, locals()) decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__ return update_wrapper(decorated, fn) @classmethod def resolve_all(cls, path): for m in list(dependencies._unresolved): if m._full_path.startswith(path): m._resolve() _unresolved = set() _by_key = {} class _importlater(object): _unresolved = set() _by_key = {} def __new__(cls, path, addtl): key = path + "." + addtl if key in dependencies._by_key: return dependencies._by_key[key] else: dependencies._by_key[key] = imp = object.__new__(cls) return imp def __init__(self, path, addtl): self._il_path = path self._il_addtl = addtl dependencies._unresolved.add(self) @property def _full_path(self): return self._il_path + "." + self._il_addtl @memoized_property def module(self): if self in dependencies._unresolved: raise ImportError( "importlater.resolve_all() hasn't " "been called (this is %s %s)" % (self._il_path, self._il_addtl) ) return getattr(self._initial_import, self._il_addtl) def _resolve(self): dependencies._unresolved.discard(self) self._initial_import = compat.import_( self._il_path, globals(), locals(), [self._il_addtl] ) def __getattr__(self, key): if key == "module": raise ImportError( "Could not resolve module %s" % self._full_path ) try: attr = getattr(self.module, key) except AttributeError: raise AttributeError( "Module %s has no attribute '%s'" % (self._full_path, key) ) self.__dict__[key] = attr return attr # from paste.deploy.converters def asbool(obj): if isinstance(obj, compat.string_types): obj = obj.strip().lower() if obj in ["true", "yes", "on", "y", "t", "1"]: return True elif obj in ["false", "no", "off", "n", "f", "0"]: return False else: raise ValueError("String is not true/false: %r" % obj) return bool(obj) def bool_or_str(*text): """Return a callable that will evaluate a string as boolean, or one of a set of "alternate" string values. """ def bool_or_value(obj): if obj in text: return obj else: return asbool(obj) return bool_or_value def asint(value): """Coerce to integer.""" if value is None: return value return int(value) def coerce_kw_type(kw, key, type_, flexi_bool=True, dest=None): r"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if necessary. If 'flexi_bool' is True, the string '0' is considered false when coercing to boolean. """ if dest is None: dest = kw if ( key in kw and (not isinstance(type_, type) or not isinstance(kw[key], type_)) and kw[key] is not None ): if type_ is bool and flexi_bool: dest[key] = asbool(kw[key]) else: dest[key] = type_(kw[key]) def constructor_copy(obj, cls, *args, **kw): """Instantiate cls using the __dict__ of obj as constructor arguments. Uses inspect to match the named arguments of ``cls``. """ names = get_cls_kwargs(cls) kw.update( (k, obj.__dict__[k]) for k in names.difference(kw) if k in obj.__dict__ ) return cls(*args, **kw) def counter(): """Return a threadsafe counter function.""" lock = compat.threading.Lock() counter = itertools.count(1) # avoid the 2to3 "next" transformation... def _next(): lock.acquire() try: return next(counter) finally: lock.release() return _next def duck_type_collection(specimen, default=None): """Given an instance or class, guess if it is or is acting as one of the basic collection types: list, set and dict. If the __emulates__ property is present, return that preferentially. """ if hasattr(specimen, "__emulates__"): # canonicalize set vs sets.Set to a standard: the builtin set if specimen.__emulates__ is not None and issubclass( specimen.__emulates__, set ): return set else: return specimen.__emulates__ isa = isinstance(specimen, type) and issubclass or isinstance if isa(specimen, list): return list elif isa(specimen, set): return set elif isa(specimen, dict): return dict if hasattr(specimen, "append"): return list elif hasattr(specimen, "add"): return set elif hasattr(specimen, "set"): return dict else: return default def assert_arg_type(arg, argtype, name): if isinstance(arg, argtype): return arg else: if isinstance(argtype, tuple): raise exc.ArgumentError( "Argument '%s' is expected to be one of type %s, got '%s'" % (name, " or ".join("'%s'" % a for a in argtype), type(arg)) ) else: raise exc.ArgumentError( "Argument '%s' is expected to be of type '%s', got '%s'" % (name, argtype, type(arg)) ) def dictlike_iteritems(dictlike): """Return a (key, value) iterator for almost any dict-like object.""" if compat.py3k: if hasattr(dictlike, "items"): return list(dictlike.items()) else: if hasattr(dictlike, "iteritems"): return dictlike.iteritems() elif hasattr(dictlike, "items"): return iter(dictlike.items()) getter = getattr(dictlike, "__getitem__", getattr(dictlike, "get", None)) if getter is None: raise TypeError("Object '%r' is not dict-like" % dictlike) if hasattr(dictlike, "iterkeys"): def iterator(): for key in dictlike.iterkeys(): yield key, getter(key) return iterator() elif hasattr(dictlike, "keys"): return iter((key, getter(key)) for key in dictlike.keys()) else: raise TypeError("Object '%r' is not dict-like" % dictlike) class classproperty(property): """A decorator that behaves like @property except that operates on classes rather than instances. The decorator is currently special when using the declarative module, but note that the :class:`~.sqlalchemy.ext.declarative.declared_attr` decorator should be used for this purpose with declarative. """ def __init__(self, fget, *arg, **kw): super(classproperty, self).__init__(fget, *arg, **kw) self.__doc__ = fget.__doc__ def __get__(desc, self, cls): return desc.fget(cls) class hybridproperty(object): def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None: clsval = self.func(owner) clsval.__doc__ = self.func.__doc__ return clsval else: return self.func(instance) class hybridmethod(object): """Decorate a function as cls- or instance- level.""" def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None: return self.func.__get__(owner, owner.__class__) else: return self.func.__get__(instance, owner) class _symbol(int): def __new__(self, name, doc=None, canonical=None): """Construct a new named symbol.""" assert isinstance(name, compat.string_types) if canonical is None: canonical = hash(name) v = int.__new__(_symbol, canonical) v.name = name if doc: v.__doc__ = doc return v def __reduce__(self): return symbol, (self.name, "x", int(self)) def __str__(self): return repr(self) def __repr__(self): return "symbol(%r)" % self.name _symbol.__name__ = "symbol" class symbol(object): """A constant symbol. >>> symbol('foo') is symbol('foo') True >>> symbol('foo') <symbol 'foo> A slight refinement of the MAGICCOOKIE=object() pattern. The primary advantage of symbol() is its repr(). They are also singletons. Repeated calls of symbol('name') will all return the same instance. The optional ``doc`` argument assigns to ``__doc__``. This is strictly so that Sphinx autoattr picks up the docstring we want (it doesn't appear to pick up the in-module docstring if the datamember is in a different module - autoattribute also blows up completely). If Sphinx fixes/improves this then we would no longer need ``doc`` here. """ symbols = {} _lock = compat.threading.Lock() def __new__(cls, name, doc=None, canonical=None): cls._lock.acquire() try: sym = cls.symbols.get(name) if sym is None: cls.symbols[name] = sym = _symbol(name, doc, canonical) return sym finally: symbol._lock.release() @classmethod def parse_user_argument( cls, arg, choices, name, resolve_symbol_names=False ): """Given a user parameter, parse the parameter into a chosen symbol. The user argument can be a string name that matches the name of a symbol, or the symbol object itself, or any number of alternate choices such as True/False/ None etc. :param arg: the user argument. :param choices: dictionary of symbol object to list of possible entries. :param name: name of the argument. Used in an :class:`.ArgumentError` that is raised if the parameter doesn't match any available argument. :param resolve_symbol_names: include the name of each symbol as a valid entry. """ # note using hash lookup is tricky here because symbol's `__hash__` # is its int value which we don't want included in the lookup # explicitly, so we iterate and compare each. for sym, choice in choices.items(): if arg is sym: return sym elif resolve_symbol_names and arg == sym.name: return sym elif arg in choice: return sym if arg is None: return None raise exc.ArgumentError("Invalid value for '%s': %r" % (name, arg)) _creation_order = 1 def set_creation_order(instance): """Assign a '_creation_order' sequence to the given instance. This allows multiple instances to be sorted in order of creation (typically within a single thread; the counter is not particularly threadsafe). """ global _creation_order instance._creation_order = _creation_order _creation_order += 1 def warn_exception(func, *args, **kwargs): """executes the given function, catches all exceptions and converts to a warning. """ try: return func(*args, **kwargs) except Exception: warn("%s('%s') ignored" % sys.exc_info()[0:2]) def ellipses_string(value, len_=25): try: if len(value) > len_: return "%s..." % value[0:len_] else: return value except TypeError: return value class _hash_limit_string(compat.text_type): """A string subclass that can only be hashed on a maximum amount of unique values. This is used for warnings so that we can send out parameterized warnings without the __warningregistry__ of the module, or the non-overridable "once" registry within warnings.py, overloading memory, """ def __new__(cls, value, num, args): interpolated = (value % args) + ( " (this warning may be suppressed after %d occurrences)" % num ) self = super(_hash_limit_string, cls).__new__(cls, interpolated) self._hash = hash("%s_%d" % (value, hash(interpolated) % num)) return self def __hash__(self): return self._hash def __eq__(self, other): return hash(self) == hash(other) def warn(msg): """Issue a warning. If msg is a string, :class:`.exc.SAWarning` is used as the category. """ warnings.warn(msg, exc.SAWarning, stacklevel=2) def warn_limited(msg, args): """Issue a warning with a parameterized string, limiting the number of registrations. """ if args: msg = _hash_limit_string(msg, 10, args) warnings.warn(msg, exc.SAWarning, stacklevel=2) def only_once(fn, retry_on_exception): """Decorate the given function to be a no-op after it is called exactly once.""" once = [fn] def go(*arg, **kw): # strong reference fn so that it isn't garbage collected, # which interferes with the event system's expectations strong_fn = fn # noqa if once: once_fn = once.pop() try: return once_fn(*arg, **kw) except: if retry_on_exception: once.insert(0, once_fn) raise return go _SQLA_RE = re.compile(r"sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py") _UNITTEST_RE = re.compile(r"unit(?:2|test2?/)") def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE): """Chop extraneous lines off beginning and end of a traceback. :param tb: a list of traceback lines as returned by ``traceback.format_stack()`` :param exclude_prefix: a regular expression object matching lines to skip at beginning of ``tb`` :param exclude_suffix: a regular expression object matching lines to skip at end of ``tb`` """ start = 0 end = len(tb) - 1 while start <= end and exclude_prefix.search(tb[start]): start += 1 while start <= end and exclude_suffix.search(tb[end]): end -= 1 return tb[start : end + 1] NoneType = type(None) def attrsetter(attrname): code = "def set(obj, value):" " obj.%s = value" % attrname env = locals().copy() exec(code, env) return env["set"] class EnsureKWArgType(type): r"""Apply translation of functions to accept \**kw arguments if they don't already. """ def __init__(cls, clsname, bases, clsdict): fn_reg = cls.ensure_kwarg if fn_reg: for key in clsdict: m = re.match(fn_reg, key) if m: fn = clsdict[key] spec = compat.inspect_getfullargspec(fn) if not spec.varkw: clsdict[key] = wrapped = cls._wrap_w_kw(fn) setattr(cls, key, wrapped) super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict) def _wrap_w_kw(self, fn): def wrap(*arg, **kw): return fn(*arg) return update_wrapper(wrap, fn) def wrap_callable(wrapper, fn): """Augment functools.update_wrapper() to work with objects with a ``__call__()`` method. :param fn: object with __call__ method """ if hasattr(fn, "__name__"): return update_wrapper(wrapper, fn) else: _f = wrapper _f.__name__ = fn.__class__.__name__ if hasattr(fn, "__module__"): _f.__module__ = fn.__module__ if hasattr(fn.__call__, "__doc__") and fn.__call__.__doc__: _f.__doc__ = fn.__call__.__doc__ elif fn.__doc__: _f.__doc__ = fn.__doc__ return _f def quoted_token_parser(value): """Parse a dotted identifier with accommodation for quoted names. Includes support for SQL-style double quotes as a literal character. E.g.:: >>> quoted_token_parser("name") ["name"] >>> quoted_token_parser("schema.name") ["schema", "name"] >>> quoted_token_parser('"Schema"."Name"') ['Schema', 'Name'] >>> quoted_token_parser('"Schema"."Name""Foo"') ['Schema', 'Name""Foo'] """ if '"' not in value: return value.split(".") # 0 = outside of quotes # 1 = inside of quotes state = 0 result = [[]] idx = 0 lv = len(value) while idx < lv: char = value[idx] if char == '"': if state == 1 and idx < lv - 1 and value[idx + 1] == '"': result[-1].append('"') idx += 1 else: state ^= 1 elif char == "." and state == 0: result.append([]) else: result[-1].append(char) idx += 1 return ["".join(token) for token in result] def add_parameter_text(params, text): params = _collections.to_list(params) def decorate(fn): doc = fn.__doc__ is not None and fn.__doc__ or "" if doc: doc = inject_param_text(doc, {param: text for param in params}) fn.__doc__ = doc return fn return decorate def _dedent_docstring(text): split_text = text.split("\n", 1) if len(split_text) == 1: return text else: firstline, remaining = split_text if not firstline.startswith(" "): return firstline + "\n" + textwrap.dedent(remaining) else: return textwrap.dedent(text) def inject_docstring_text(doctext, injecttext, pos): doctext = _dedent_docstring(doctext or "") lines = doctext.split("\n") if len(lines) == 1: lines.append("") injectlines = textwrap.dedent(injecttext).split("\n") if injectlines[0]: injectlines.insert(0, "") blanks = [num for num, line in enumerate(lines) if not line.strip()] blanks.insert(0, 0) inject_pos = blanks[min(pos, len(blanks) - 1)] lines = lines[0:inject_pos] + injectlines + lines[inject_pos:] return "\n".join(lines) def inject_param_text(doctext, inject_params): doclines = doctext.splitlines() lines = [] to_inject = None while doclines: line = doclines.pop(0) if to_inject is None: m = re.match(r"(\s+):param (?:\\\*\*?)?(.+?):", line) if m: param = m.group(2) if param in inject_params: # default indent to that of :param: plus one indent = " " * len(m.group(1)) + " " # but if the next line has text, use that line's # indentntation if doclines: m2 = re.match(r"(\s+)\S", doclines[0]) if m2: indent = " " * len(m2.group(1)) to_inject = indent + inject_params[param] elif line.lstrip().startswith(":param "): lines.append("\n") lines.append(to_inject) lines.append("\n") to_inject = None elif not line.rstrip(): lines.append(line) lines.append(to_inject) lines.append("\n") to_inject = None elif line.endswith("::"): # TODO: this still wont cover if the code example itself has blank # lines in it, need to detect those via indentation. lines.append(line) lines.append( doclines.pop(0) ) # the blank line following a code example continue lines.append(line) return "\n".join(lines)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/instrumentation.py
"""Extensible class instrumentation. The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate systems of class instrumentation within the ORM. Class instrumentation refers to how the ORM places attributes on the class which maintain data and track changes to that data, as well as event hooks installed on the class. .. note:: The extension package is provided for the benefit of integration with other object management packages, which already perform their own instrumentation. It is not intended for general use. For examples of how the instrumentation extension is used, see the example :ref:`examples_instrumentation`. """ import weakref from .. import util from ..orm import attributes from ..orm import base as orm_base from ..orm import collections from ..orm import exc as orm_exc from ..orm import instrumentation as orm_instrumentation from ..orm.instrumentation import _default_dict_getter from ..orm.instrumentation import _default_manager_getter from ..orm.instrumentation import _default_state_getter from ..orm.instrumentation import ClassManager from ..orm.instrumentation import InstrumentationFactory INSTRUMENTATION_MANAGER = "__sa_instrumentation_manager__" """Attribute, elects custom instrumentation when present on a mapped class. Allows a class to specify a slightly or wildly different technique for tracking changes made to mapped attributes and collections. Only one instrumentation implementation is allowed in a given object inheritance hierarchy. The value of this attribute must be a callable and will be passed a class object. The callable must return one of: - An instance of an InstrumentationManager or subclass - An object implementing all or some of InstrumentationManager (TODO) - A dictionary of callables, implementing all or some of the above (TODO) - An instance of a ClassManager or subclass This attribute is consulted by SQLAlchemy instrumentation resolution, once the :mod:`sqlalchemy.ext.instrumentation` module has been imported. If custom finders are installed in the global instrumentation_finders list, they may or may not choose to honor this attribute. """ def find_native_user_instrumentation_hook(cls): """Find user-specified instrumentation management for a class.""" return getattr(cls, INSTRUMENTATION_MANAGER, None) instrumentation_finders = [find_native_user_instrumentation_hook] """An extensible sequence of callables which return instrumentation implementations When a class is registered, each callable will be passed a class object. If None is returned, the next finder in the sequence is consulted. Otherwise the return must be an instrumentation factory that follows the same guidelines as sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER. By default, the only finder is find_native_user_instrumentation_hook, which searches for INSTRUMENTATION_MANAGER. If all finders return None, standard ClassManager instrumentation is used. """ class ExtendedInstrumentationRegistry(InstrumentationFactory): """Extends :class:`.InstrumentationFactory` with additional bookkeeping, to accommodate multiple types of class managers. """ _manager_finders = weakref.WeakKeyDictionary() _state_finders = weakref.WeakKeyDictionary() _dict_finders = weakref.WeakKeyDictionary() _extended = False def _locate_extended_factory(self, class_): for finder in instrumentation_finders: factory = finder(class_) if factory is not None: manager = self._extended_class_manager(class_, factory) return manager, factory else: return None, None def _check_conflicts(self, class_, factory): existing_factories = self._collect_management_factories_for( class_ ).difference([factory]) if existing_factories: raise TypeError( "multiple instrumentation implementations specified " "in %s inheritance hierarchy: %r" % (class_.__name__, list(existing_factories)) ) def _extended_class_manager(self, class_, factory): manager = factory(class_) if not isinstance(manager, ClassManager): manager = _ClassInstrumentationAdapter(class_, manager) if factory != ClassManager and not self._extended: # somebody invoked a custom ClassManager. # reinstall global "getter" functions with the more # expensive ones. self._extended = True _install_instrumented_lookups() self._manager_finders[class_] = manager.manager_getter() self._state_finders[class_] = manager.state_getter() self._dict_finders[class_] = manager.dict_getter() return manager def _collect_management_factories_for(self, cls): """Return a collection of factories in play or specified for a hierarchy. Traverses the entire inheritance graph of a cls and returns a collection of instrumentation factories for those classes. Factories are extracted from active ClassManagers, if available, otherwise instrumentation_finders is consulted. """ hierarchy = util.class_hierarchy(cls) factories = set() for member in hierarchy: manager = self.manager_of_class(member) if manager is not None: factories.add(manager.factory) else: for finder in instrumentation_finders: factory = finder(member) if factory is not None: break else: factory = None factories.add(factory) factories.discard(None) return factories def unregister(self, class_): if class_ in self._manager_finders: del self._manager_finders[class_] del self._state_finders[class_] del self._dict_finders[class_] super(ExtendedInstrumentationRegistry, self).unregister(class_) def manager_of_class(self, cls): if cls is None: return None try: finder = self._manager_finders.get(cls, _default_manager_getter) except TypeError: # due to weakref lookup on invalid object return None else: return finder(cls) def state_of(self, instance): if instance is None: raise AttributeError("None has no persistent state.") return self._state_finders.get( instance.__class__, _default_state_getter )(instance) def dict_of(self, instance): if instance is None: raise AttributeError("None has no persistent state.") return self._dict_finders.get( instance.__class__, _default_dict_getter )(instance) orm_instrumentation._instrumentation_factory = ( _instrumentation_factory ) = ExtendedInstrumentationRegistry() orm_instrumentation.instrumentation_finders = instrumentation_finders class InstrumentationManager(object): """User-defined class instrumentation extension. :class:`.InstrumentationManager` can be subclassed in order to change how class instrumentation proceeds. This class exists for the purposes of integration with other object management frameworks which would like to entirely modify the instrumentation methodology of the ORM, and is not intended for regular usage. For interception of class instrumentation events, see :class:`.InstrumentationEvents`. The API for this class should be considered as semi-stable, and may change slightly with new releases. """ # r4361 added a mandatory (cls) constructor to this interface. # given that, perhaps class_ should be dropped from all of these # signatures. def __init__(self, class_): pass def manage(self, class_, manager): setattr(class_, "_default_class_manager", manager) def dispose(self, class_, manager): delattr(class_, "_default_class_manager") def manager_getter(self, class_): def get(cls): return cls._default_class_manager return get def instrument_attribute(self, class_, key, inst): pass def post_configure_attribute(self, class_, key, inst): pass def install_descriptor(self, class_, key, inst): setattr(class_, key, inst) def uninstall_descriptor(self, class_, key): delattr(class_, key) def install_member(self, class_, key, implementation): setattr(class_, key, implementation) def uninstall_member(self, class_, key): delattr(class_, key) def instrument_collection_class(self, class_, key, collection_class): return collections.prepare_instrumentation(collection_class) def get_instance_dict(self, class_, instance): return instance.__dict__ def initialize_instance_dict(self, class_, instance): pass def install_state(self, class_, instance, state): setattr(instance, "_default_state", state) def remove_state(self, class_, instance): delattr(instance, "_default_state") def state_getter(self, class_): return lambda instance: getattr(instance, "_default_state") def dict_getter(self, class_): return lambda inst: self.get_instance_dict(class_, inst) class _ClassInstrumentationAdapter(ClassManager): """Adapts a user-defined InstrumentationManager to a ClassManager.""" def __init__(self, class_, override): self._adapted = override self._get_state = self._adapted.state_getter(class_) self._get_dict = self._adapted.dict_getter(class_) ClassManager.__init__(self, class_) def manage(self): self._adapted.manage(self.class_, self) def dispose(self): self._adapted.dispose(self.class_) def manager_getter(self): return self._adapted.manager_getter(self.class_) def instrument_attribute(self, key, inst, propagated=False): ClassManager.instrument_attribute(self, key, inst, propagated) if not propagated: self._adapted.instrument_attribute(self.class_, key, inst) def post_configure_attribute(self, key): super(_ClassInstrumentationAdapter, self).post_configure_attribute(key) self._adapted.post_configure_attribute(self.class_, key, self[key]) def install_descriptor(self, key, inst): self._adapted.install_descriptor(self.class_, key, inst) def uninstall_descriptor(self, key): self._adapted.uninstall_descriptor(self.class_, key) def install_member(self, key, implementation): self._adapted.install_member(self.class_, key, implementation) def uninstall_member(self, key): self._adapted.uninstall_member(self.class_, key) def instrument_collection_class(self, key, collection_class): return self._adapted.instrument_collection_class( self.class_, key, collection_class ) def initialize_collection(self, key, state, factory): delegate = getattr(self._adapted, "initialize_collection", None) if delegate: return delegate(key, state, factory) else: return ClassManager.initialize_collection( self, key, state, factory ) def new_instance(self, state=None): instance = self.class_.__new__(self.class_) self.setup_instance(instance, state) return instance def _new_state_if_none(self, instance): """Install a default InstanceState if none is present. A private convenience method used by the __init__ decorator. """ if self.has_state(instance): return False else: return self.setup_instance(instance) def setup_instance(self, instance, state=None): self._adapted.initialize_instance_dict(self.class_, instance) if state is None: state = self._state_constructor(instance, self) # the given instance is assumed to have no state self._adapted.install_state(self.class_, instance, state) return state def teardown_instance(self, instance): self._adapted.remove_state(self.class_, instance) def has_state(self, instance): try: self._get_state(instance) except orm_exc.NO_STATE: return False else: return True def state_getter(self): return self._get_state def dict_getter(self): return self._get_dict def _install_instrumented_lookups(): """Replace global class/object management functions with ExtendedInstrumentationRegistry implementations, which allow multiple types of class managers to be present, at the cost of performance. This function is called only by ExtendedInstrumentationRegistry and unit tests specific to this behavior. The _reinstall_default_lookups() function can be called after this one to re-establish the default functions. """ _install_lookups( dict( instance_state=_instrumentation_factory.state_of, instance_dict=_instrumentation_factory.dict_of, manager_of_class=_instrumentation_factory.manager_of_class, ) ) def _reinstall_default_lookups(): """Restore simplified lookups.""" _install_lookups( dict( instance_state=_default_state_getter, instance_dict=_default_dict_getter, manager_of_class=_default_manager_getter, ) ) _instrumentation_factory._extended = False def _install_lookups(lookups): global instance_state, instance_dict, manager_of_class instance_state = lookups["instance_state"] instance_dict = lookups["instance_dict"] manager_of_class = lookups["manager_of_class"] orm_base.instance_state = ( attributes.instance_state ) = orm_instrumentation.instance_state = instance_state orm_base.instance_dict = ( attributes.instance_dict ) = orm_instrumentation.instance_dict = instance_dict orm_base.manager_of_class = ( attributes.manager_of_class ) = orm_instrumentation.manager_of_class = manager_of_class
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/compiler.py
# ext/compiler.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r"""Provides an API for creation of custom ClauseElements and compilers. Synopsis ======== Usage involves the creation of one or more :class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or more callables defining its compilation:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import ColumnClause class MyColumn(ColumnClause): pass @compiles(MyColumn) def compile_mycolumn(element, compiler, **kw): return "[%s]" % element.name Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`, the base expression element for named column objects. The ``compiles`` decorator registers itself with the ``MyColumn`` class so that it is invoked when the object is compiled to a string:: from sqlalchemy import select s = select([MyColumn('x'), MyColumn('y')]) print(str(s)) Produces:: SELECT [x], [y] Dialect-specific compilation rules ================================== Compilers can also be made dialect-specific. The appropriate compiler will be invoked for the dialect in use:: from sqlalchemy.schema import DDLElement class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER COLUMN %s ..." % element.column.name @compiles(AlterColumn, 'postgresql') def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, element.column.name) The second ``visit_alter_table`` will be invoked when any ``postgresql`` dialect is used. Compiling sub-elements of a custom expression construct ======================================================= The ``compiler`` argument is the :class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object can be inspected for any information about the in-progress compilation, including ``compiler.dialect``, ``compiler.statement`` etc. The :class:`~sqlalchemy.sql.compiler.SQLCompiler` and :class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()`` method which can be used for compilation of embedded attributes:: from sqlalchemy.sql.expression import Executable, ClauseElement class InsertFromSelect(Executable, ClauseElement): def __init__(self, table, select): self.table = table self.select = select @compiles(InsertFromSelect) def visit_insert_from_select(element, compiler, **kw): return "INSERT INTO %s (%s)" % ( compiler.process(element.table, asfrom=True, **kw), compiler.process(element.select, **kw) ) insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5)) print(insert) Produces:: "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z FROM mytable WHERE mytable.x > :x_1)" .. note:: The above ``InsertFromSelect`` construct is only an example, this actual functionality is already available using the :meth:`_expression.Insert.from_select` method. .. note:: The above ``InsertFromSelect`` construct probably wants to have "autocommit" enabled. See :ref:`enabling_compiled_autocommit` for this step. Cross Compiling between SQL and DDL compilers --------------------------------------------- SQL and DDL constructs are each compiled using different base compilers - ``SQLCompiler`` and ``DDLCompiler``. A common need is to access the compilation rules of SQL expressions from within a DDL expression. The ``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as below where we generate a CHECK constraint that embeds a SQL expression:: @compiles(MyConstraint) def compile_my_constraint(constraint, ddlcompiler, **kw): kw['literal_binds'] = True return "CONSTRAINT %s CHECK (%s)" % ( constraint.name, ddlcompiler.sql_compiler.process( constraint.expression, **kw) ) Above, we add an additional flag to the process step as called by :meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This indicates that any SQL expression which refers to a :class:`.BindParameter` object or other "literal" object such as those which refer to strings or integers should be rendered **in-place**, rather than being referred to as a bound parameter; when emitting DDL, bound parameters are typically not supported. .. _enabling_compiled_autocommit: Enabling Autocommit on a Construct ================================== Recall from the section :ref:`autocommit` that the :class:`_engine.Engine`, when asked to execute a construct in the absence of a user-defined transaction, detects if the given construct represents DML or DDL, that is, a data modification or data definition statement, which requires (or may require, in the case of DDL) that the transaction generated by the DBAPI be committed (recall that DBAPI always has a transaction going on regardless of what SQLAlchemy does). Checking for this is actually accomplished by checking for the "autocommit" execution option on the construct. When building a construct like an INSERT derivation, a new DDL type, or perhaps a stored procedure that alters data, the "autocommit" option needs to be set in order for the statement to function with "connectionless" execution (as described in :ref:`dbengine_implicit`). Currently a quick way to do this is to subclass :class:`.Executable`, then add the "autocommit" flag to the ``_execution_options`` dictionary (note this is a "frozen" dictionary which supplies a generative ``union()`` method):: from sqlalchemy.sql.expression import Executable, ClauseElement class MyInsertThing(Executable, ClauseElement): _execution_options = \ Executable._execution_options.union({'autocommit': True}) More succinctly, if the construct is truly similar to an INSERT, UPDATE, or DELETE, :class:`.UpdateBase` can be used, which already is a subclass of :class:`.Executable`, :class:`_expression.ClauseElement` and includes the ``autocommit`` flag:: from sqlalchemy.sql.expression import UpdateBase class MyInsertThing(UpdateBase): def __init__(self, ...): ... DDL elements that subclass :class:`.DDLElement` already have the "autocommit" flag turned on. Changing the default compilation of existing constructs ======================================================= The compiler extension applies just as well to the existing constructs. When overriding the compilation of a built in SQL construct, the @compiles decorator is invoked upon the appropriate class (be sure to use the class, i.e. ``Insert`` or ``Select``, instead of the creation function such as ``insert()`` or ``select()``). Within the new compilation function, to get at the "original" compilation routine, use the appropriate visit_XXX method - this because compiler.process() will call upon the overriding routine and cause an endless loop. Such as, to add "prefix" to all insert statements:: from sqlalchemy.sql.expression import Insert @compiles(Insert) def prefix_inserts(insert, compiler, **kw): return compiler.visit_insert(insert.prefix_with("some prefix"), **kw) The above compiler will prefix all INSERT statements with "some prefix" when compiled. .. _type_compilation_extension: Changing Compilation of Types ============================= ``compiler`` works for types, too, such as below where we implement the MS-SQL specific 'max' keyword for ``String``/``VARCHAR``:: @compiles(String, 'mssql') @compiles(VARCHAR, 'mssql') def compile_varchar(element, compiler, **kw): if element.length == 'max': return "VARCHAR('max')" else: return compiler.visit_VARCHAR(element, **kw) foo = Table('foo', metadata, Column('data', VARCHAR('max')) ) Subclassing Guidelines ====================== A big part of using the compiler extension is subclassing SQLAlchemy expression constructs. To make this easier, the expression and schema packages feature a set of "bases" intended for common tasks. A synopsis is as follows: * :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root expression class. Any SQL expression can be derived from this base, and is probably the best choice for longer constructs such as specialized INSERT statements. * :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all "column-like" elements. Anything that you'd place in the "columns" clause of a SELECT statement (as well as order by and group by) can derive from this - the object will automatically have Python "comparison" behavior. :class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a ``type`` member which is expression's return type. This can be established at the instance level in the constructor, or at the class level if its generally constant:: class timestamp(ColumnElement): type = TIMESTAMP() * :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a ``ColumnElement`` and a "from clause" like object, and represents a SQL function or stored procedure type of call. Since most databases support statements along the line of "SELECT FROM <some function>" ``FunctionElement`` adds in the ability to be used in the FROM clause of a ``select()`` construct:: from sqlalchemy.sql.expression import FunctionElement class coalesce(FunctionElement): name = 'coalesce' @compiles(coalesce) def compile(element, compiler, **kw): return "coalesce(%s)" % compiler.process(element.clauses, **kw) @compiles(coalesce, 'oracle') def compile(element, compiler, **kw): if len(element.clauses) > 2: raise TypeError("coalesce only supports two arguments on Oracle") return "nvl(%s)" % compiler.process(element.clauses, **kw) * :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions, like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement`` subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``. ``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the ``execute_at()`` method, allowing the construct to be invoked during CREATE TABLE and DROP TABLE sequences. * :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which should be used with any expression class that represents a "standalone" SQL statement that can be passed directly to an ``execute()`` method. It is already implicit within ``DDLElement`` and ``FunctionElement``. Further Examples ================ "UTC timestamp" function ------------------------- A function that works like "CURRENT_TIMESTAMP" except applies the appropriate conversions so that the time is in UTC time. Timestamps are best stored in relational databases as UTC, without time zones. UTC so that your database doesn't think time has gone backwards in the hour when daylight savings ends, without timezones because timezones are like character encodings - they're best applied only at the endpoints of an application (i.e. convert to UTC upon user input, re-apply desired timezone upon display). For PostgreSQL and Microsoft SQL Server:: from sqlalchemy.sql import expression from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import DateTime class utcnow(expression.FunctionElement): type = DateTime() @compiles(utcnow, 'postgresql') def pg_utcnow(element, compiler, **kw): return "TIMEZONE('utc', CURRENT_TIMESTAMP)" @compiles(utcnow, 'mssql') def ms_utcnow(element, compiler, **kw): return "GETUTCDATE()" Example usage:: from sqlalchemy import ( Table, Column, Integer, String, DateTime, MetaData ) metadata = MetaData() event = Table("event", metadata, Column("id", Integer, primary_key=True), Column("description", String(50), nullable=False), Column("timestamp", DateTime, server_default=utcnow()) ) "GREATEST" function ------------------- The "GREATEST" function is given any number of arguments and returns the one that is of the highest value - its equivalent to Python's ``max`` function. A SQL standard version versus a CASE based version which only accommodates two arguments:: from sqlalchemy.sql import expression, case from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import Numeric class greatest(expression.FunctionElement): type = Numeric() name = 'greatest' @compiles(greatest) def default_greatest(element, compiler, **kw): return compiler.visit_function(element) @compiles(greatest, 'sqlite') @compiles(greatest, 'mssql') @compiles(greatest, 'oracle') def case_greatest(element, compiler, **kw): arg1, arg2 = list(element.clauses) return compiler.process(case([(arg1 > arg2, arg1)], else_=arg2), **kw) Example usage:: Session.query(Account).\ filter( greatest( Account.checking_balance, Account.savings_balance) > 10000 ) "false" expression ------------------ Render a "false" constant expression, rendering as "0" on platforms that don't have a "false" constant:: from sqlalchemy.sql import expression from sqlalchemy.ext.compiler import compiles class sql_false(expression.ColumnElement): pass @compiles(sql_false) def default_false(element, compiler, **kw): return "false" @compiles(sql_false, 'mssql') @compiles(sql_false, 'mysql') @compiles(sql_false, 'oracle') def int_false(element, compiler, **kw): return "0" Example usage:: from sqlalchemy import select, union_all exp = union_all( select([users.c.name, sql_false().label("enrolled")]), select([customers.c.name, customers.c.enrolled]) ) """ from .. import exc from .. import util from ..sql import visitors def compiles(class_, *specs): """Register a function as a compiler for a given :class:`_expression.ClauseElement` type.""" def decorate(fn): # get an existing @compiles handler existing = class_.__dict__.get("_compiler_dispatcher", None) # get the original handler. All ClauseElement classes have one # of these, but some TypeEngine classes will not. existing_dispatch = getattr(class_, "_compiler_dispatch", None) if not existing: existing = _dispatcher() if existing_dispatch: def _wrap_existing_dispatch(element, compiler, **kw): try: return existing_dispatch(element, compiler, **kw) except exc.UnsupportedCompilationError as uce: util.raise_( exc.CompileError( "%s construct has no default " "compilation handler." % type(element) ), from_=uce, ) existing.specs["default"] = _wrap_existing_dispatch # TODO: why is the lambda needed ? setattr( class_, "_compiler_dispatch", lambda *arg, **kw: existing(*arg, **kw), ) setattr(class_, "_compiler_dispatcher", existing) if specs: for s in specs: existing.specs[s] = fn else: existing.specs["default"] = fn return fn return decorate def deregister(class_): """Remove all custom compilers associated with a given :class:`_expression.ClauseElement` type.""" if hasattr(class_, "_compiler_dispatcher"): # regenerate default _compiler_dispatch visitors._generate_dispatch(class_) # remove custom directive del class_._compiler_dispatcher class _dispatcher(object): def __init__(self): self.specs = {} def __call__(self, element, compiler, **kw): # TODO: yes, this could also switch off of DBAPI in use. fn = self.specs.get(compiler.dialect.name, None) if not fn: try: fn = self.specs["default"] except KeyError as ke: util.raise_( exc.CompileError( "%s construct has no default " "compilation handler." % type(element) ), replace_context=ke, ) return fn(element, compiler, **kw)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/orderinglist.py
# ext/orderinglist.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """A custom list that manages index/position information for contained elements. :author: Jason Kirtland ``orderinglist`` is a helper for mutable ordered relationships. It will intercept list operations performed on a :func:`_orm.relationship`-managed collection and automatically synchronize changes in list position onto a target scalar attribute. Example: A ``slide`` table, where each row refers to zero or more entries in a related ``bullet`` table. The bullets within a slide are displayed in order based on the value of the ``position`` column in the ``bullet`` table. As entries are reordered in memory, the value of the ``position`` attribute should be updated to reflect the new sort order:: Base = declarative_base() class Slide(Base): __tablename__ = 'slide' id = Column(Integer, primary_key=True) name = Column(String) bullets = relationship("Bullet", order_by="Bullet.position") class Bullet(Base): __tablename__ = 'bullet' id = Column(Integer, primary_key=True) slide_id = Column(Integer, ForeignKey('slide.id')) position = Column(Integer) text = Column(String) The standard relationship mapping will produce a list-like attribute on each ``Slide`` containing all related ``Bullet`` objects, but coping with changes in ordering is not handled automatically. When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position`` attribute will remain unset until manually assigned. When the ``Bullet`` is inserted into the middle of the list, the following ``Bullet`` objects will also need to be renumbered. The :class:`.OrderingList` object automates this task, managing the ``position`` attribute on all ``Bullet`` objects in the collection. It is constructed using the :func:`.ordering_list` factory:: from sqlalchemy.ext.orderinglist import ordering_list Base = declarative_base() class Slide(Base): __tablename__ = 'slide' id = Column(Integer, primary_key=True) name = Column(String) bullets = relationship("Bullet", order_by="Bullet.position", collection_class=ordering_list('position')) class Bullet(Base): __tablename__ = 'bullet' id = Column(Integer, primary_key=True) slide_id = Column(Integer, ForeignKey('slide.id')) position = Column(Integer) text = Column(String) With the above mapping the ``Bullet.position`` attribute is managed:: s = Slide() s.bullets.append(Bullet()) s.bullets.append(Bullet()) s.bullets[1].position >>> 1 s.bullets.insert(1, Bullet()) s.bullets[2].position >>> 2 The :class:`.OrderingList` construct only works with **changes** to a collection, and not the initial load from the database, and requires that the list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the :func:`_orm.relationship` against the target ordering attribute, so that the ordering is correct when first loaded. .. warning:: :class:`.OrderingList` only provides limited functionality when a primary key column or unique column is the target of the sort. Operations that are unsupported or are problematic include: * two entries must trade values. This is not supported directly in the case of a primary key or unique constraint because it means at least one row would need to be temporarily removed first, or changed to a third, neutral value while the switch occurs. * an entry must be deleted in order to make room for a new entry. SQLAlchemy's unit of work performs all INSERTs before DELETEs within a single flush. In the case of a primary key, it will trade an INSERT/DELETE of the same primary key for an UPDATE statement in order to lessen the impact of this limitation, however this does not take place for a UNIQUE column. A future feature will allow the "DELETE before INSERT" behavior to be possible, alleviating this limitation, though this feature will require explicit configuration at the mapper level for sets of columns that are to be handled in this way. :func:`.ordering_list` takes the name of the related object's ordering attribute as an argument. By default, the zero-based integer index of the object's position in the :func:`.ordering_list` is synchronized with the ordering attribute: index 0 will get position 0, index 1 position 1, etc. To start numbering at 1 or some other integer, provide ``count_from=1``. """ from .. import util from ..orm.collections import collection from ..orm.collections import collection_adapter __all__ = ["ordering_list"] def ordering_list(attr, count_from=None, **kw): """Prepares an :class:`OrderingList` factory for use in mapper definitions. Returns an object suitable for use as an argument to a Mapper relationship's ``collection_class`` option. e.g.:: from sqlalchemy.ext.orderinglist import ordering_list class Slide(Base): __tablename__ = 'slide' id = Column(Integer, primary_key=True) name = Column(String) bullets = relationship("Bullet", order_by="Bullet.position", collection_class=ordering_list('position')) :param attr: Name of the mapped attribute to use for storage and retrieval of ordering information :param count_from: Set up an integer-based ordering, starting at ``count_from``. For example, ``ordering_list('pos', count_from=1)`` would create a 1-based list in SQL, storing the value in the 'pos' column. Ignored if ``ordering_func`` is supplied. Additional arguments are passed to the :class:`.OrderingList` constructor. """ kw = _unsugar_count_from(count_from=count_from, **kw) return lambda: OrderingList(attr, **kw) # Ordering utility functions def count_from_0(index, collection): """Numbering function: consecutive integers starting at 0.""" return index def count_from_1(index, collection): """Numbering function: consecutive integers starting at 1.""" return index + 1 def count_from_n_factory(start): """Numbering function: consecutive integers starting at arbitrary start.""" def f(index, collection): return index + start try: f.__name__ = "count_from_%i" % start except TypeError: pass return f def _unsugar_count_from(**kw): """Builds counting functions from keyword arguments. Keyword argument filter, prepares a simple ``ordering_func`` from a ``count_from`` argument, otherwise passes ``ordering_func`` on unchanged. """ count_from = kw.pop("count_from", None) if kw.get("ordering_func", None) is None and count_from is not None: if count_from == 0: kw["ordering_func"] = count_from_0 elif count_from == 1: kw["ordering_func"] = count_from_1 else: kw["ordering_func"] = count_from_n_factory(count_from) return kw class OrderingList(list): """A custom list that manages position information for its children. The :class:`.OrderingList` object is normally set up using the :func:`.ordering_list` factory function, used in conjunction with the :func:`_orm.relationship` function. """ def __init__( self, ordering_attr=None, ordering_func=None, reorder_on_append=False ): """A custom list that manages position information for its children. ``OrderingList`` is a ``collection_class`` list implementation that syncs position in a Python list with a position attribute on the mapped objects. This implementation relies on the list starting in the proper order, so be **sure** to put an ``order_by`` on your relationship. :param ordering_attr: Name of the attribute that stores the object's order in the relationship. :param ordering_func: Optional. A function that maps the position in the Python list to a value to store in the ``ordering_attr``. Values returned are usually (but need not be!) integers. An ``ordering_func`` is called with two positional parameters: the index of the element in the list, and the list itself. If omitted, Python list indexes are used for the attribute values. Two basic pre-built numbering functions are provided in this module: ``count_from_0`` and ``count_from_1``. For more exotic examples like stepped numbering, alphabetical and Fibonacci numbering, see the unit tests. :param reorder_on_append: Default False. When appending an object with an existing (non-None) ordering value, that value will be left untouched unless ``reorder_on_append`` is true. This is an optimization to avoid a variety of dangerous unexpected database writes. SQLAlchemy will add instances to the list via append() when your object loads. If for some reason the result set from the database skips a step in the ordering (say, row '1' is missing but you get '2', '3', and '4'), reorder_on_append=True would immediately renumber the items to '1', '2', '3'. If you have multiple sessions making changes, any of whom happen to load this collection even in passing, all of the sessions would try to "clean up" the numbering in their commits, possibly causing all but one to fail with a concurrent modification error. Recommend leaving this with the default of False, and just call ``reorder()`` if you're doing ``append()`` operations with previously ordered instances or when doing some housekeeping after manual sql operations. """ self.ordering_attr = ordering_attr if ordering_func is None: ordering_func = count_from_0 self.ordering_func = ordering_func self.reorder_on_append = reorder_on_append # More complex serialization schemes (multi column, e.g.) are possible by # subclassing and reimplementing these two methods. def _get_order_value(self, entity): return getattr(entity, self.ordering_attr) def _set_order_value(self, entity, value): setattr(entity, self.ordering_attr, value) def reorder(self): """Synchronize ordering for the entire collection. Sweeps through the list and ensures that each object has accurate ordering information set. """ for index, entity in enumerate(self): self._order_entity(index, entity, True) # As of 0.5, _reorder is no longer semi-private _reorder = reorder def _order_entity(self, index, entity, reorder=True): have = self._get_order_value(entity) # Don't disturb existing ordering if reorder is False if have is not None and not reorder: return should_be = self.ordering_func(index, self) if have != should_be: self._set_order_value(entity, should_be) def append(self, entity): super(OrderingList, self).append(entity) self._order_entity(len(self) - 1, entity, self.reorder_on_append) def _raw_append(self, entity): """Append without any ordering behavior.""" super(OrderingList, self).append(entity) _raw_append = collection.adds(1)(_raw_append) def insert(self, index, entity): super(OrderingList, self).insert(index, entity) self._reorder() def remove(self, entity): super(OrderingList, self).remove(entity) adapter = collection_adapter(self) if adapter and adapter._referenced_by_owner: self._reorder() def pop(self, index=-1): entity = super(OrderingList, self).pop(index) self._reorder() return entity def __setitem__(self, index, entity): if isinstance(index, slice): step = index.step or 1 start = index.start or 0 if start < 0: start += len(self) stop = index.stop or len(self) if stop < 0: stop += len(self) for i in range(start, stop, step): self.__setitem__(i, entity[i]) else: self._order_entity(index, entity, True) super(OrderingList, self).__setitem__(index, entity) def __delitem__(self, index): super(OrderingList, self).__delitem__(index) self._reorder() def __setslice__(self, start, end, values): super(OrderingList, self).__setslice__(start, end, values) self._reorder() def __delslice__(self, start, end): super(OrderingList, self).__delslice__(start, end) self._reorder() def __reduce__(self): return _reconstitute, (self.__class__, self.__dict__, list(self)) for func_name, func in list(locals().items()): if ( util.callable(func) and func.__name__ == func_name and not func.__doc__ and hasattr(list, func_name) ): func.__doc__ = getattr(list, func_name).__doc__ del func_name, func def _reconstitute(cls, dict_, items): """ Reconstitute an :class:`.OrderingList`. This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for unpickling :class:`.OrderingList` objects. """ obj = cls.__new__(cls) obj.__dict__.update(dict_) list.extend(obj, items) return obj
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/hybrid.py
# ext/hybrid.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r"""Define attributes on ORM-mapped classes that have "hybrid" behavior. "hybrid" means the attribute has distinct behaviors defined at the class level and at the instance level. The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of method decorator, is around 50 lines of code and has almost no dependencies on the rest of SQLAlchemy. It can, in theory, work with any descriptor-based expression system. Consider a mapping ``Interval``, representing integer ``start`` and ``end`` values. We can define higher level functions on mapped classes that produce SQL expressions at the class level, and Python expression evaluation at the instance level. Below, each function decorated with :class:`.hybrid_method` or :class:`.hybrid_property` may receive ``self`` as an instance of the class, or as the class itself:: from sqlalchemy import Column, Integer from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Session, aliased from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method Base = declarative_base() class Interval(Base): __tablename__ = 'interval' id = Column(Integer, primary_key=True) start = Column(Integer, nullable=False) end = Column(Integer, nullable=False) def __init__(self, start, end): self.start = start self.end = end @hybrid_property def length(self): return self.end - self.start @hybrid_method def contains(self, point): return (self.start <= point) & (point <= self.end) @hybrid_method def intersects(self, other): return self.contains(other.start) | self.contains(other.end) Above, the ``length`` property returns the difference between the ``end`` and ``start`` attributes. With an instance of ``Interval``, this subtraction occurs in Python, using normal Python descriptor mechanics:: >>> i1 = Interval(5, 10) >>> i1.length 5 When dealing with the ``Interval`` class itself, the :class:`.hybrid_property` descriptor evaluates the function body given the ``Interval`` class as the argument, which when evaluated with SQLAlchemy expression mechanics returns a new SQL expression:: >>> print(Interval.length) interval."end" - interval.start >>> print(Session().query(Interval).filter(Interval.length > 10)) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE interval."end" - interval.start > :param_1 ORM methods such as :meth:`_query.Query.filter_by` generally use ``getattr()`` to locate attributes, so can also be used with hybrid attributes:: >>> print(Session().query(Interval).filter_by(length=5)) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE interval."end" - interval.start = :param_1 The ``Interval`` class example also illustrates two methods, ``contains()`` and ``intersects()``, decorated with :class:`.hybrid_method`. This decorator applies the same idea to methods that :class:`.hybrid_property` applies to attributes. The methods return boolean values, and take advantage of the Python ``|`` and ``&`` bitwise operators to produce equivalent instance-level and SQL expression-level boolean behavior:: >>> i1.contains(6) True >>> i1.contains(15) False >>> i1.intersects(Interval(7, 18)) True >>> i1.intersects(Interval(25, 29)) False >>> print(Session().query(Interval).filter(Interval.contains(15))) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE interval.start <= :start_1 AND interval."end" > :end_1 >>> ia = aliased(Interval) >>> print(Session().query(Interval, ia).filter(Interval.intersects(ia))) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end, interval_1.id AS interval_1_id, interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end FROM interval, interval AS interval_1 WHERE interval.start <= interval_1.start AND interval."end" > interval_1.start OR interval.start <= interval_1."end" AND interval."end" > interval_1."end" .. _hybrid_distinct_expression: Defining Expression Behavior Distinct from Attribute Behavior -------------------------------------------------------------- Our usage of the ``&`` and ``|`` bitwise operators above was fortunate, considering our functions operated on two boolean values to return a new one. In many cases, the construction of an in-Python function and a SQLAlchemy SQL expression have enough differences that two separate Python expressions should be defined. The :mod:`~sqlalchemy.ext.hybrid` decorators define the :meth:`.hybrid_property.expression` modifier for this purpose. As an example we'll define the radius of the interval, which requires the usage of the absolute value function:: from sqlalchemy import func class Interval(object): # ... @hybrid_property def radius(self): return abs(self.length) / 2 @radius.expression def radius(cls): return func.abs(cls.length) / 2 Above the Python function ``abs()`` is used for instance-level operations, the SQL function ``ABS()`` is used via the :data:`.func` object for class-level expressions:: >>> i1.radius 2 >>> print(Session().query(Interval).filter(Interval.radius > 5)) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1 .. note:: When defining an expression for a hybrid property or method, the expression method **must** retain the name of the original hybrid, else the new hybrid with the additional state will be attached to the class with the non-matching name. To use the example above:: class Interval(object): # ... @hybrid_property def radius(self): return abs(self.length) / 2 # WRONG - the non-matching name will cause this function to be # ignored @radius.expression def radius_expression(cls): return func.abs(cls.length) / 2 This is also true for other mutator methods, such as :meth:`.hybrid_property.update_expression`. This is the same behavior as that of the ``@property`` construct that is part of standard Python. Defining Setters ---------------- Hybrid properties can also define setter methods. If we wanted ``length`` above, when set, to modify the endpoint value:: class Interval(object): # ... @hybrid_property def length(self): return self.end - self.start @length.setter def length(self, value): self.end = self.start + value The ``length(self, value)`` method is now called upon set:: >>> i1 = Interval(5, 10) >>> i1.length 5 >>> i1.length = 12 >>> i1.end 17 .. _hybrid_bulk_update: Allowing Bulk ORM Update ------------------------ A hybrid can define a custom "UPDATE" handler for when using the :meth:`_query.Query.update` method, allowing the hybrid to be used in the SET clause of the update. Normally, when using a hybrid with :meth:`_query.Query.update`, the SQL expression is used as the column that's the target of the SET. If our ``Interval`` class had a hybrid ``start_point`` that linked to ``Interval.start``, this could be substituted directly:: session.query(Interval).update({Interval.start_point: 10}) However, when using a composite hybrid like ``Interval.length``, this hybrid represents more than one column. We can set up a handler that will accommodate a value passed to :meth:`_query.Query.update` which can affect this, using the :meth:`.hybrid_property.update_expression` decorator. A handler that works similarly to our setter would be:: class Interval(object): # ... @hybrid_property def length(self): return self.end - self.start @length.setter def length(self, value): self.end = self.start + value @length.update_expression def length(cls, value): return [ (cls.end, cls.start + value) ] Above, if we use ``Interval.length`` in an UPDATE expression as:: session.query(Interval).update( {Interval.length: 25}, synchronize_session='fetch') We'll get an UPDATE statement along the lines of:: UPDATE interval SET end=start + :value In some cases, the default "evaluate" strategy can't perform the SET expression in Python; while the addition operator we're using above is supported, for more complex SET expressions it will usually be necessary to use either the "fetch" or False synchronization strategy as illustrated above. .. versionadded:: 1.2 added support for bulk updates to hybrid properties. Working with Relationships -------------------------- There's no essential difference when creating hybrids that work with related objects as opposed to column-based data. The need for distinct expressions tends to be greater. The two variants we'll illustrate are the "join-dependent" hybrid, and the "correlated subquery" hybrid. Join-Dependent Relationship Hybrid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Consider the following declarative mapping which relates a ``User`` to a ``SavingsAccount``:: from sqlalchemy import Column, Integer, ForeignKey, Numeric, String from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property Base = declarative_base() class SavingsAccount(Base): __tablename__ = 'account' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id'), nullable=False) balance = Column(Numeric(15, 5)) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) accounts = relationship("SavingsAccount", backref="owner") @hybrid_property def balance(self): if self.accounts: return self.accounts[0].balance else: return None @balance.setter def balance(self, value): if not self.accounts: account = Account(owner=self) else: account = self.accounts[0] account.balance = value @balance.expression def balance(cls): return SavingsAccount.balance The above hybrid property ``balance`` works with the first ``SavingsAccount`` entry in the list of accounts for this user. The in-Python getter/setter methods can treat ``accounts`` as a Python list available on ``self``. However, at the expression level, it's expected that the ``User`` class will be used in an appropriate context such that an appropriate join to ``SavingsAccount`` will be present:: >>> print(Session().query(User, User.balance). ... join(User.accounts).filter(User.balance > 5000)) SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance FROM "user" JOIN account ON "user".id = account.user_id WHERE account.balance > :balance_1 Note however, that while the instance level accessors need to worry about whether ``self.accounts`` is even present, this issue expresses itself differently at the SQL expression level, where we basically would use an outer join:: >>> from sqlalchemy import or_ >>> print (Session().query(User, User.balance).outerjoin(User.accounts). ... filter(or_(User.balance < 5000, User.balance == None))) SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id WHERE account.balance < :balance_1 OR account.balance IS NULL Correlated Subquery Relationship Hybrid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We can, of course, forego being dependent on the enclosing query's usage of joins in favor of the correlated subquery, which can portably be packed into a single column expression. A correlated subquery is more portable, but often performs more poorly at the SQL level. Using the same technique illustrated at :ref:`mapper_column_property_sql_expressions`, we can adjust our ``SavingsAccount`` example to aggregate the balances for *all* accounts, and use a correlated subquery for the column expression:: from sqlalchemy import Column, Integer, ForeignKey, Numeric, String from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import select, func Base = declarative_base() class SavingsAccount(Base): __tablename__ = 'account' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id'), nullable=False) balance = Column(Numeric(15, 5)) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) accounts = relationship("SavingsAccount", backref="owner") @hybrid_property def balance(self): return sum(acc.balance for acc in self.accounts) @balance.expression def balance(cls): return select([func.sum(SavingsAccount.balance)]).\ where(SavingsAccount.user_id==cls.id).\ label('total_balance') The above recipe will give us the ``balance`` column which renders a correlated SELECT:: >>> print(s.query(User).filter(User.balance > 400)) SELECT "user".id AS user_id, "user".name AS user_name FROM "user" WHERE (SELECT sum(account.balance) AS sum_1 FROM account WHERE account.user_id = "user".id) > :param_1 .. _hybrid_custom_comparators: Building Custom Comparators --------------------------- The hybrid property also includes a helper that allows construction of custom comparators. A comparator object allows one to customize the behavior of each SQLAlchemy expression operator individually. They are useful when creating custom types that have some highly idiosyncratic behavior on the SQL side. .. note:: The :meth:`.hybrid_property.comparator` decorator introduced in this section **replaces** the use of the :meth:`.hybrid_property.expression` decorator. They cannot be used together. The example class below allows case-insensitive comparisons on the attribute named ``word_insensitive``:: from sqlalchemy.ext.hybrid import Comparator, hybrid_property from sqlalchemy import func, Column, Integer, String from sqlalchemy.orm import Session from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class CaseInsensitiveComparator(Comparator): def __eq__(self, other): return func.lower(self.__clause_element__()) == func.lower(other) class SearchWord(Base): __tablename__ = 'searchword' id = Column(Integer, primary_key=True) word = Column(String(255), nullable=False) @hybrid_property def word_insensitive(self): return self.word.lower() @word_insensitive.comparator def word_insensitive(cls): return CaseInsensitiveComparator(cls.word) Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()`` SQL function to both sides:: >>> print(Session().query(SearchWord).filter_by(word_insensitive="Trucks")) SELECT searchword.id AS searchword_id, searchword.word AS searchword_word FROM searchword WHERE lower(searchword.word) = lower(:lower_1) The ``CaseInsensitiveComparator`` above implements part of the :class:`.ColumnOperators` interface. A "coercion" operation like lowercasing can be applied to all comparison operations (i.e. ``eq``, ``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: class CaseInsensitiveComparator(Comparator): def operate(self, op, other): return op(func.lower(self.__clause_element__()), func.lower(other)) .. _hybrid_reuse_subclass: Reusing Hybrid Properties across Subclasses ------------------------------------------- A hybrid can be referred to from a superclass, to allow modifying methods like :meth:`.hybrid_property.getter`, :meth:`.hybrid_property.setter` to be used to redefine those methods on a subclass. This is similar to how the standard Python ``@property`` object works:: class FirstNameOnly(Base): # ... first_name = Column(String) @hybrid_property def name(self): return self.first_name @name.setter def name(self, value): self.first_name = value class FirstNameLastName(FirstNameOnly): # ... last_name = Column(String) @FirstNameOnly.name.getter def name(self): return self.first_name + ' ' + self.last_name @name.setter def name(self, value): self.first_name, self.last_name = value.split(' ', 1) Above, the ``FirstNameLastName`` class refers to the hybrid from ``FirstNameOnly.name`` to repurpose its getter and setter for the subclass. When overriding :meth:`.hybrid_property.expression` and :meth:`.hybrid_property.comparator` alone as the first reference to the superclass, these names conflict with the same-named accessors on the class- level :class:`.QueryableAttribute` object returned at the class level. To override these methods when referring directly to the parent class descriptor, add the special qualifier :attr:`.hybrid_property.overrides`, which will de- reference the instrumented attribute back to the hybrid object:: class FirstNameLastName(FirstNameOnly): # ... last_name = Column(String) @FirstNameOnly.name.overrides.expression def name(cls): return func.concat(cls.first_name, ' ', cls.last_name) .. versionadded:: 1.2 Added :meth:`.hybrid_property.getter` as well as the ability to redefine accessors per-subclass. Hybrid Value Objects -------------------- Note in our previous example, if we were to compare the ``word_insensitive`` attribute of a ``SearchWord`` instance to a plain Python string, the plain Python string would not be coerced to lower case - the ``CaseInsensitiveComparator`` we built, being returned by ``@word_insensitive.comparator``, only applies to the SQL side. A more comprehensive form of the custom comparator is to construct a *Hybrid Value Object*. This technique applies the target value or expression to a value object which is then returned by the accessor in all cases. The value object allows control of all operations upon the value as well as how compared values are treated, both on the SQL expression side as well as the Python value side. Replacing the previous ``CaseInsensitiveComparator`` class with a new ``CaseInsensitiveWord`` class:: class CaseInsensitiveWord(Comparator): "Hybrid value representing a lower case representation of a word." def __init__(self, word): if isinstance(word, basestring): self.word = word.lower() elif isinstance(word, CaseInsensitiveWord): self.word = word.word else: self.word = func.lower(word) def operate(self, op, other): if not isinstance(other, CaseInsensitiveWord): other = CaseInsensitiveWord(other) return op(self.word, other.word) def __clause_element__(self): return self.word def __str__(self): return self.word key = 'word' "Label to apply to Query tuple results" Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may be a SQL function, or may be a Python native. By overriding ``operate()`` and ``__clause_element__()`` to work in terms of ``self.word``, all comparison operations will work against the "converted" form of ``word``, whether it be SQL side or Python side. Our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` object unconditionally from a single hybrid call:: class SearchWord(Base): __tablename__ = 'searchword' id = Column(Integer, primary_key=True) word = Column(String(255), nullable=False) @hybrid_property def word_insensitive(self): return CaseInsensitiveWord(self.word) The ``word_insensitive`` attribute now has case-insensitive comparison behavior universally, including SQL expression vs. Python expression (note the Python value is converted to lower case on the Python side here):: >>> print(Session().query(SearchWord).filter_by(word_insensitive="Trucks")) SELECT searchword.id AS searchword_id, searchword.word AS searchword_word FROM searchword WHERE lower(searchword.word) = :lower_1 SQL expression versus SQL expression:: >>> sw1 = aliased(SearchWord) >>> sw2 = aliased(SearchWord) >>> print(Session().query( ... sw1.word_insensitive, ... sw2.word_insensitive).\ ... filter( ... sw1.word_insensitive > sw2.word_insensitive ... )) SELECT lower(searchword_1.word) AS lower_1, lower(searchword_2.word) AS lower_2 FROM searchword AS searchword_1, searchword AS searchword_2 WHERE lower(searchword_1.word) > lower(searchword_2.word) Python only expression:: >>> ws1 = SearchWord(word="SomeWord") >>> ws1.word_insensitive == "sOmEwOrD" True >>> ws1.word_insensitive == "XOmEwOrX" False >>> print(ws1.word_insensitive) someword The Hybrid Value pattern is very useful for any kind of value that may have multiple representations, such as timestamps, time deltas, units of measurement, currencies and encrypted passwords. .. seealso:: `Hybrids and Value Agnostic Types <http://techspot.zzzeek.org/2011/10/21/hybrids-and-value-agnostic-types/>`_ - on the techspot.zzzeek.org blog `Value Agnostic Types, Part II <http://techspot.zzzeek.org/2011/10/29/value-agnostic-types-part-ii/>`_ - on the techspot.zzzeek.org blog .. _hybrid_transformers: Building Transformers ---------------------- A *transformer* is an object which can receive a :class:`_query.Query` object and return a new one. The :class:`_query.Query` object includes a method :meth:`.with_transformation` that returns a new :class:`_query.Query` transformed by the given function. We can combine this with the :class:`.Comparator` class to produce one type of recipe which can both set up the FROM clause of a query as well as assign filtering criterion. Consider a mapped class ``Node``, which assembles using adjacency list into a hierarchical tree pattern:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) parent = relationship("Node", remote_side=id) Suppose we wanted to add an accessor ``grandparent``. This would return the ``parent`` of ``Node.parent``. When we have an instance of ``Node``, this is simple:: from sqlalchemy.ext.hybrid import hybrid_property class Node(Base): # ... @hybrid_property def grandparent(self): return self.parent.parent For the expression, things are not so clear. We'd need to construct a :class:`_query.Query` where we :meth:`_query.Query.join` twice along ``Node. parent`` to get to the ``grandparent``. We can instead return a transforming callable that we'll combine with the :class:`.Comparator` class to receive any :class:`_query.Query` object, and return a new one that's joined to the ``Node.parent`` attribute and filtered based on the given criterion:: from sqlalchemy.ext.hybrid import Comparator class GrandparentTransformer(Comparator): def operate(self, op, other): def transform(q): cls = self.__clause_element__() parent_alias = aliased(cls) return q.join(parent_alias, cls.parent).\ filter(op(parent_alias.parent, other)) return transform Base = declarative_base() class Node(Base): __tablename__ = 'node' id =Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) parent = relationship("Node", remote_side=id) @hybrid_property def grandparent(self): return self.parent.parent @grandparent.comparator def grandparent(cls): return GrandparentTransformer(cls) The ``GrandparentTransformer`` overrides the core :meth:`.Operators.operate` method at the base of the :class:`.Comparator` hierarchy to return a query- transforming callable, which then runs the given comparison operation in a particular context. Such as, in the example above, the ``operate`` method is called, given the :attr:`.Operators.eq` callable as well as the right side of the comparison ``Node(id=5)``. A function ``transform`` is then returned which will transform a :class:`_query.Query` first to join to ``Node.parent``, then to compare ``parent_alias`` using :attr:`.Operators.eq` against the left and right sides, passing into :class:`_query.Query.filter`: .. sourcecode:: pycon+sql >>> from sqlalchemy.orm import Session >>> session = Session() {sql}>>> session.query(Node).\ ... with_transformation(Node.grandparent==Node(id=5)).\ ... all() SELECT node.id AS node_id, node.parent_id AS node_parent_id FROM node JOIN node AS node_1 ON node_1.id = node.parent_id WHERE :param_1 = node_1.parent_id {stop} We can modify the pattern to be more verbose but flexible by separating the "join" step from the "filter" step. The tricky part here is ensuring that successive instances of ``GrandparentTransformer`` use the same :class:`.AliasedClass` object against ``Node``. Below we use a simple memoizing approach that associates a ``GrandparentTransformer`` with each class:: class Node(Base): # ... @grandparent.comparator def grandparent(cls): # memoize a GrandparentTransformer # per class if '_gp' not in cls.__dict__: cls._gp = GrandparentTransformer(cls) return cls._gp class GrandparentTransformer(Comparator): def __init__(self, cls): self.parent_alias = aliased(cls) @property def join(self): def go(q): return q.join(self.parent_alias, Node.parent) return go def operate(self, op, other): return op(self.parent_alias.parent, other) .. sourcecode:: pycon+sql {sql}>>> session.query(Node).\ ... with_transformation(Node.grandparent.join).\ ... filter(Node.grandparent==Node(id=5)) SELECT node.id AS node_id, node.parent_id AS node_parent_id FROM node JOIN node AS node_1 ON node_1.id = node.parent_id WHERE :param_1 = node_1.parent_id {stop} The "transformer" pattern is an experimental pattern that starts to make usage of some functional programming paradigms. While it's only recommended for advanced and/or patient developers, there's probably a whole lot of amazing things it can be used for. """ # noqa from .. import util from ..orm import attributes from ..orm import interfaces HYBRID_METHOD = util.symbol("HYBRID_METHOD") """Symbol indicating an :class:`InspectionAttr` that's of type :class:`.hybrid_method`. Is assigned to the :attr:`.InspectionAttr.extension_type` attribute. .. seealso:: :attr:`_orm.Mapper.all_orm_attributes` """ HYBRID_PROPERTY = util.symbol("HYBRID_PROPERTY") """Symbol indicating an :class:`InspectionAttr` that's of type :class:`.hybrid_method`. Is assigned to the :attr:`.InspectionAttr.extension_type` attribute. .. seealso:: :attr:`_orm.Mapper.all_orm_attributes` """ class hybrid_method(interfaces.InspectionAttrInfo): """A decorator which allows definition of a Python object method with both instance-level and class-level behavior. """ is_attribute = True extension_type = HYBRID_METHOD def __init__(self, func, expr=None): """Create a new :class:`.hybrid_method`. Usage is typically via decorator:: from sqlalchemy.ext.hybrid import hybrid_method class SomeClass(object): @hybrid_method def value(self, x, y): return self._value + x + y @value.expression def value(self, x, y): return func.some_function(self._value, x, y) """ self.func = func self.expression(expr or func) def __get__(self, instance, owner): if instance is None: return self.expr.__get__(owner, owner.__class__) else: return self.func.__get__(instance, owner) def expression(self, expr): """Provide a modifying decorator that defines a SQL-expression producing method.""" self.expr = expr if not self.expr.__doc__: self.expr.__doc__ = self.func.__doc__ return self class hybrid_property(interfaces.InspectionAttrInfo): """A decorator which allows definition of a Python descriptor with both instance-level and class-level behavior. """ is_attribute = True extension_type = HYBRID_PROPERTY def __init__( self, fget, fset=None, fdel=None, expr=None, custom_comparator=None, update_expr=None, ): """Create a new :class:`.hybrid_property`. Usage is typically via decorator:: from sqlalchemy.ext.hybrid import hybrid_property class SomeClass(object): @hybrid_property def value(self): return self._value @value.setter def value(self, value): self._value = value """ self.fget = fget self.fset = fset self.fdel = fdel self.expr = expr self.custom_comparator = custom_comparator self.update_expr = update_expr util.update_wrapper(self, fget) def __get__(self, instance, owner): if instance is None: return self._expr_comparator(owner) else: return self.fget(instance) def __set__(self, instance, value): if self.fset is None: raise AttributeError("can't set attribute") self.fset(instance, value) def __delete__(self, instance): if self.fdel is None: raise AttributeError("can't delete attribute") self.fdel(instance) def _copy(self, **kw): defaults = { key: value for key, value in self.__dict__.items() if not key.startswith("_") } defaults.update(**kw) return type(self)(**defaults) @property def overrides(self): """Prefix for a method that is overriding an existing attribute. The :attr:`.hybrid_property.overrides` accessor just returns this hybrid object, which when called at the class level from a parent class, will de-reference the "instrumented attribute" normally returned at this level, and allow modifying decorators like :meth:`.hybrid_property.expression` and :meth:`.hybrid_property.comparator` to be used without conflicting with the same-named attributes normally present on the :class:`.QueryableAttribute`:: class SuperClass(object): # ... @hybrid_property def foobar(self): return self._foobar class SubClass(SuperClass): # ... @SuperClass.foobar.overrides.expression def foobar(cls): return func.subfoobar(self._foobar) .. versionadded:: 1.2 .. seealso:: :ref:`hybrid_reuse_subclass` """ return self def getter(self, fget): """Provide a modifying decorator that defines a getter method. .. versionadded:: 1.2 """ return self._copy(fget=fget) def setter(self, fset): """Provide a modifying decorator that defines a setter method.""" return self._copy(fset=fset) def deleter(self, fdel): """Provide a modifying decorator that defines a deletion method.""" return self._copy(fdel=fdel) def expression(self, expr): """Provide a modifying decorator that defines a SQL-expression producing method. When a hybrid is invoked at the class level, the SQL expression given here is wrapped inside of a specialized :class:`.QueryableAttribute`, which is the same kind of object used by the ORM to represent other mapped attributes. The reason for this is so that other class-level attributes such as docstrings and a reference to the hybrid itself may be maintained within the structure that's returned, without any modifications to the original SQL expression passed in. .. note:: when referring to a hybrid property from an owning class (e.g. ``SomeClass.some_hybrid``), an instance of :class:`.QueryableAttribute` is returned, representing the expression or comparator object as well as this hybrid object. However, that object itself has accessors called ``expression`` and ``comparator``; so when attempting to override these decorators on a subclass, it may be necessary to qualify it using the :attr:`.hybrid_property.overrides` modifier first. See that modifier for details. .. seealso:: :ref:`hybrid_distinct_expression` """ return self._copy(expr=expr) def comparator(self, comparator): """Provide a modifying decorator that defines a custom comparator producing method. The return value of the decorated method should be an instance of :class:`~.hybrid.Comparator`. .. note:: The :meth:`.hybrid_property.comparator` decorator **replaces** the use of the :meth:`.hybrid_property.expression` decorator. They cannot be used together. When a hybrid is invoked at the class level, the :class:`~.hybrid.Comparator` object given here is wrapped inside of a specialized :class:`.QueryableAttribute`, which is the same kind of object used by the ORM to represent other mapped attributes. The reason for this is so that other class-level attributes such as docstrings and a reference to the hybrid itself may be maintained within the structure that's returned, without any modifications to the original comparator object passed in. .. note:: when referring to a hybrid property from an owning class (e.g. ``SomeClass.some_hybrid``), an instance of :class:`.QueryableAttribute` is returned, representing the expression or comparator object as this hybrid object. However, that object itself has accessors called ``expression`` and ``comparator``; so when attempting to override these decorators on a subclass, it may be necessary to qualify it using the :attr:`.hybrid_property.overrides` modifier first. See that modifier for details. """ return self._copy(custom_comparator=comparator) def update_expression(self, meth): """Provide a modifying decorator that defines an UPDATE tuple producing method. The method accepts a single value, which is the value to be rendered into the SET clause of an UPDATE statement. The method should then process this value into individual column expressions that fit into the ultimate SET clause, and return them as a sequence of 2-tuples. Each tuple contains a column expression as the key and a value to be rendered. E.g.:: class Person(Base): # ... first_name = Column(String) last_name = Column(String) @hybrid_property def fullname(self): return first_name + " " + last_name @fullname.update_expression def fullname(cls, value): fname, lname = value.split(" ", 1) return [ (cls.first_name, fname), (cls.last_name, lname) ] .. versionadded:: 1.2 """ return self._copy(update_expr=meth) @util.memoized_property def _expr_comparator(self): if self.custom_comparator is not None: return self._get_comparator(self.custom_comparator) elif self.expr is not None: return self._get_expr(self.expr) else: return self._get_expr(self.fget) def _get_expr(self, expr): def _expr(cls): return ExprComparator(cls, expr(cls), self) util.update_wrapper(_expr, expr) return self._get_comparator(_expr) def _get_comparator(self, comparator): proxy_attr = attributes.create_proxied_attribute(self) def expr_comparator(owner): return proxy_attr( owner, self.__name__, self, comparator(owner), doc=comparator.__doc__ or self.__doc__, ) return expr_comparator class Comparator(interfaces.PropComparator): """A helper class that allows easy construction of custom :class:`~.orm.interfaces.PropComparator` classes for usage with hybrids.""" property = None def __init__(self, expression): self.expression = expression def __clause_element__(self): expr = self.expression if hasattr(expr, "__clause_element__"): expr = expr.__clause_element__() return expr def adapt_to_entity(self, adapt_to_entity): # interesting.... return self class ExprComparator(Comparator): def __init__(self, cls, expression, hybrid): self.cls = cls self.expression = expression self.hybrid = hybrid def __getattr__(self, key): return getattr(self.expression, key) @property def info(self): return self.hybrid.info def _bulk_update_tuples(self, value): if isinstance(self.expression, attributes.QueryableAttribute): return self.expression._bulk_update_tuples(value) elif self.hybrid.update_expr is not None: return self.hybrid.update_expr(self.cls, value) else: return [(self.expression, value)] @property def property(self): return self.expression.property def operate(self, op, *other, **kwargs): return op(self.expression, *other, **kwargs) def reverse_operate(self, op, other, **kwargs): return op(other, self.expression, **kwargs)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/__init__.py
# ext/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import util as _sa_util _sa_util.dependencies.resolve_all("sqlalchemy.ext")
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/indexable.py
# ext/index.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Define attributes on ORM-mapped classes that have "index" attributes for columns with :class:`_types.Indexable` types. "index" means the attribute is associated with an element of an :class:`_types.Indexable` column with the predefined index to access it. The :class:`_types.Indexable` types include types such as :class:`_types.ARRAY`, :class:`_types.JSON` and :class:`_postgresql.HSTORE`. The :mod:`~sqlalchemy.ext.indexable` extension provides :class:`_schema.Column`-like interface for any element of an :class:`_types.Indexable` typed column. In simple cases, it can be treated as a :class:`_schema.Column` - mapped attribute. .. versionadded:: 1.1 Synopsis ======== Given ``Person`` as a model with a primary key and JSON data field. While this field may have any number of elements encoded within it, we would like to refer to the element called ``name`` individually as a dedicated attribute which behaves like a standalone column:: from sqlalchemy import Column, JSON, Integer from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.indexable import index_property Base = declarative_base() class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) data = Column(JSON) name = index_property('data', 'name') Above, the ``name`` attribute now behaves like a mapped column. We can compose a new ``Person`` and set the value of ``name``:: >>> person = Person(name='Alchemist') The value is now accessible:: >>> person.name 'Alchemist' Behind the scenes, the JSON field was initialized to a new blank dictionary and the field was set:: >>> person.data {"name": "Alchemist'} The field is mutable in place:: >>> person.name = 'Renamed' >>> person.name 'Renamed' >>> person.data {'name': 'Renamed'} When using :class:`.index_property`, the change that we make to the indexable structure is also automatically tracked as history; we no longer need to use :class:`~.mutable.MutableDict` in order to track this change for the unit of work. Deletions work normally as well:: >>> del person.name >>> person.data {} Above, deletion of ``person.name`` deletes the value from the dictionary, but not the dictionary itself. A missing key will produce ``AttributeError``:: >>> person = Person() >>> person.name ... AttributeError: 'name' Unless you set a default value:: >>> class Person(Base): >>> __tablename__ = 'person' >>> >>> id = Column(Integer, primary_key=True) >>> data = Column(JSON) >>> >>> name = index_property('data', 'name', default=None) # See default >>> person = Person() >>> print(person.name) None The attributes are also accessible at the class level. Below, we illustrate ``Person.name`` used to generate an indexed SQL criteria:: >>> from sqlalchemy.orm import Session >>> session = Session() >>> query = session.query(Person).filter(Person.name == 'Alchemist') The above query is equivalent to:: >>> query = session.query(Person).filter(Person.data['name'] == 'Alchemist') Multiple :class:`.index_property` objects can be chained to produce multiple levels of indexing:: from sqlalchemy import Column, JSON, Integer from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.indexable import index_property Base = declarative_base() class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) data = Column(JSON) birthday = index_property('data', 'birthday') year = index_property('birthday', 'year') month = index_property('birthday', 'month') day = index_property('birthday', 'day') Above, a query such as:: q = session.query(Person).filter(Person.year == '1980') On a PostgreSQL backend, the above query will render as:: SELECT person.id, person.data FROM person WHERE person.data -> %(data_1)s -> %(param_1)s = %(param_2)s Default Values ============== :class:`.index_property` includes special behaviors for when the indexed data structure does not exist, and a set operation is called: * For an :class:`.index_property` that is given an integer index value, the default data structure will be a Python list of ``None`` values, at least as long as the index value; the value is then set at its place in the list. This means for an index value of zero, the list will be initialized to ``[None]`` before setting the given value, and for an index value of five, the list will be initialized to ``[None, None, None, None, None]`` before setting the fifth element to the given value. Note that an existing list is **not** extended in place to receive a value. * for an :class:`.index_property` that is given any other kind of index value (e.g. strings usually), a Python dictionary is used as the default data structure. * The default data structure can be set to any Python callable using the :paramref:`.index_property.datatype` parameter, overriding the previous rules. Subclassing =========== :class:`.index_property` can be subclassed, in particular for the common use case of providing coercion of values or SQL expressions as they are accessed. Below is a common recipe for use with a PostgreSQL JSON type, where we want to also include automatic casting plus ``astext()``:: class pg_json_property(index_property): def __init__(self, attr_name, index, cast_type): super(pg_json_property, self).__init__(attr_name, index) self.cast_type = cast_type def expr(self, model): expr = super(pg_json_property, self).expr(model) return expr.astext.cast(self.cast_type) The above subclass can be used with the PostgreSQL-specific version of :class:`_postgresql.JSON`:: from sqlalchemy import Column, Integer from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.dialects.postgresql import JSON Base = declarative_base() class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) data = Column(JSON) age = pg_json_property('data', 'age', Integer) The ``age`` attribute at the instance level works as before; however when rendering SQL, PostgreSQL's ``->>`` operator will be used for indexed access, instead of the usual index operator of ``->``:: >>> query = session.query(Person).filter(Person.age < 20) The above query will render:: SELECT person.id, person.data FROM person WHERE CAST(person.data ->> %(data_1)s AS INTEGER) < %(param_1)s """ # noqa from __future__ import absolute_import from .. import inspect from .. import util from ..ext.hybrid import hybrid_property from ..orm.attributes import flag_modified __all__ = ["index_property"] class index_property(hybrid_property): # noqa """A property generator. The generated property describes an object attribute that corresponds to an :class:`_types.Indexable` column. .. versionadded:: 1.1 .. seealso:: :mod:`sqlalchemy.ext.indexable` """ _NO_DEFAULT_ARGUMENT = object() def __init__( self, attr_name, index, default=_NO_DEFAULT_ARGUMENT, datatype=None, mutable=True, onebased=True, ): """Create a new :class:`.index_property`. :param attr_name: An attribute name of an `Indexable` typed column, or other attribute that returns an indexable structure. :param index: The index to be used for getting and setting this value. This should be the Python-side index value for integers. :param default: A value which will be returned instead of `AttributeError` when there is not a value at given index. :param datatype: default datatype to use when the field is empty. By default, this is derived from the type of index used; a Python list for an integer index, or a Python dictionary for any other style of index. For a list, the list will be initialized to a list of None values that is at least ``index`` elements long. :param mutable: if False, writes and deletes to the attribute will be disallowed. :param onebased: assume the SQL representation of this value is one-based; that is, the first index in SQL is 1, not zero. """ if mutable: super(index_property, self).__init__( self.fget, self.fset, self.fdel, self.expr ) else: super(index_property, self).__init__( self.fget, None, None, self.expr ) self.attr_name = attr_name self.index = index self.default = default is_numeric = isinstance(index, int) onebased = is_numeric and onebased if datatype is not None: self.datatype = datatype else: if is_numeric: self.datatype = lambda: [None for x in range(index + 1)] else: self.datatype = dict self.onebased = onebased def _fget_default(self, err=None): if self.default == self._NO_DEFAULT_ARGUMENT: util.raise_(AttributeError(self.attr_name), replace_context=err) else: return self.default def fget(self, instance): attr_name = self.attr_name column_value = getattr(instance, attr_name) if column_value is None: return self._fget_default() try: value = column_value[self.index] except (KeyError, IndexError) as err: return self._fget_default(err) else: return value def fset(self, instance, value): attr_name = self.attr_name column_value = getattr(instance, attr_name, None) if column_value is None: column_value = self.datatype() setattr(instance, attr_name, column_value) column_value[self.index] = value setattr(instance, attr_name, column_value) if attr_name in inspect(instance).mapper.attrs: flag_modified(instance, attr_name) def fdel(self, instance): attr_name = self.attr_name column_value = getattr(instance, attr_name) if column_value is None: raise AttributeError(self.attr_name) try: del column_value[self.index] except KeyError as err: util.raise_(AttributeError(self.attr_name), replace_context=err) else: setattr(instance, attr_name, column_value) flag_modified(instance, attr_name) def expr(self, model): column = getattr(model, self.attr_name) index = self.index if self.onebased: index += 1 return column[index]
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/serializer.py
# ext/serializer.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Serializer/Deserializer objects for usage with SQLAlchemy query structures, allowing "contextual" deserialization. Any SQLAlchemy query structure, either based on sqlalchemy.sql.* or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session etc. which are referenced by the structure are not persisted in serialized form, but are instead re-associated with the query structure when it is deserialized. Usage is nearly the same as that of the standard Python pickle module:: from sqlalchemy.ext.serializer import loads, dumps metadata = MetaData(bind=some_engine) Session = scoped_session(sessionmaker()) # ... define mappers query = Session.query(MyClass). filter(MyClass.somedata=='foo').order_by(MyClass.sortkey) # pickle the query serialized = dumps(query) # unpickle. Pass in metadata + scoped_session query2 = loads(serialized, metadata, Session) print query2.all() Similar restrictions as when using raw pickle apply; mapped classes must be themselves be pickleable, meaning they are importable from a module-level namespace. The serializer module is only appropriate for query structures. It is not needed for: * instances of user-defined classes. These contain no references to engines, sessions or expression constructs in the typical case and can be serialized directly. * Table metadata that is to be loaded entirely from the serialized structure (i.e. is not already declared in the application). Regular pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object, typically one which was reflected from an existing database at some previous point in time. The serializer module is specifically for the opposite case, where the Table metadata is already present in memory. """ import re from .. import Column from .. import Table from ..engine import Engine from ..orm import class_mapper from ..orm.attributes import QueryableAttribute from ..orm.interfaces import MapperProperty from ..orm.mapper import Mapper from ..orm.session import Session from ..util import b64decode from ..util import b64encode from ..util import byte_buffer from ..util import pickle from ..util import text_type __all__ = ["Serializer", "Deserializer", "dumps", "loads"] def Serializer(*args, **kw): pickler = pickle.Pickler(*args, **kw) def persistent_id(obj): # print "serializing:", repr(obj) if isinstance(obj, QueryableAttribute): cls = obj.impl.class_ key = obj.impl.key id_ = "attribute:" + key + ":" + b64encode(pickle.dumps(cls)) elif isinstance(obj, Mapper) and not obj.non_primary: id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: id_ = ( "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + ":" + obj.key ) elif isinstance(obj, Table): id_ = "table:" + text_type(obj.key) elif isinstance(obj, Column) and isinstance(obj.table, Table): id_ = ( "column:" + text_type(obj.table.key) + ":" + text_type(obj.key) ) elif isinstance(obj, Session): id_ = "session:" elif isinstance(obj, Engine): id_ = "engine:" else: return None return id_ pickler.persistent_id = persistent_id return pickler our_ids = re.compile( r"(mapperprop|mapper|table|column|session|attribute|engine):(.*)" ) def Deserializer(file, metadata=None, scoped_session=None, engine=None): unpickler = pickle.Unpickler(file) def get_engine(): if engine: return engine elif scoped_session and scoped_session().bind: return scoped_session().bind elif metadata and metadata.bind: return metadata.bind else: return None def persistent_load(id_): m = our_ids.match(text_type(id_)) if not m: return None else: type_, args = m.group(1, 2) if type_ == "attribute": key, clsarg = args.split(":") cls = pickle.loads(b64decode(clsarg)) return getattr(cls, key) elif type_ == "mapper": cls = pickle.loads(b64decode(args)) return class_mapper(cls) elif type_ == "mapperprop": mapper, keyname = args.split(":") cls = pickle.loads(b64decode(mapper)) return class_mapper(cls).attrs[keyname] elif type_ == "table": return metadata.tables[args] elif type_ == "column": table, colname = args.split(":") return metadata.tables[table].c[colname] elif type_ == "session": return scoped_session() elif type_ == "engine": return get_engine() else: raise Exception("Unknown token: %s" % type_) unpickler.persistent_load = persistent_load return unpickler def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL): buf = byte_buffer() pickler = Serializer(buf, protocol) pickler.dump(obj) return buf.getvalue() def loads(data, metadata=None, scoped_session=None, engine=None): buf = byte_buffer(data) unpickler = Deserializer(buf, metadata, scoped_session, engine) return unpickler.load()
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/baked.py
# sqlalchemy/ext/baked.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Baked query extension. Provides a creational pattern for the :class:`.query.Query` object which allows the fully constructed object, Core select statement, and string compiled result to be fully cached. """ import copy import logging from .. import exc as sa_exc from .. import util from ..orm import exc as orm_exc from ..orm import strategy_options from ..orm.query import Query from ..orm.session import Session from ..sql import func from ..sql import literal_column from ..sql import util as sql_util log = logging.getLogger(__name__) class Bakery(object): """Callable which returns a :class:`.BakedQuery`. This object is returned by the class method :meth:`.BakedQuery.bakery`. It exists as an object so that the "cache" can be easily inspected. .. versionadded:: 1.2 """ __slots__ = "cls", "cache" def __init__(self, cls_, cache): self.cls = cls_ self.cache = cache def __call__(self, initial_fn, *args): return self.cls(self.cache, initial_fn, args) class BakedQuery(object): """A builder object for :class:`.query.Query` objects.""" __slots__ = "steps", "_bakery", "_cache_key", "_spoiled" def __init__(self, bakery, initial_fn, args=()): self._cache_key = () self._update_cache_key(initial_fn, args) self.steps = [initial_fn] self._spoiled = False self._bakery = bakery @classmethod def bakery(cls, size=200, _size_alert=None): """Construct a new bakery. :return: an instance of :class:`.Bakery` """ return Bakery(cls, util.LRUCache(size, size_alert=_size_alert)) def _clone(self): b1 = BakedQuery.__new__(BakedQuery) b1._cache_key = self._cache_key b1.steps = list(self.steps) b1._bakery = self._bakery b1._spoiled = self._spoiled return b1 def _update_cache_key(self, fn, args=()): self._cache_key += (fn.__code__,) + args def __iadd__(self, other): if isinstance(other, tuple): self.add_criteria(*other) else: self.add_criteria(other) return self def __add__(self, other): if isinstance(other, tuple): return self.with_criteria(*other) else: return self.with_criteria(other) def add_criteria(self, fn, *args): """Add a criteria function to this :class:`.BakedQuery`. This is equivalent to using the ``+=`` operator to modify a :class:`.BakedQuery` in-place. """ self._update_cache_key(fn, args) self.steps.append(fn) return self def with_criteria(self, fn, *args): """Add a criteria function to a :class:`.BakedQuery` cloned from this one. This is equivalent to using the ``+`` operator to produce a new :class:`.BakedQuery` with modifications. """ return self._clone().add_criteria(fn, *args) def for_session(self, session): """Return a :class:`.Result` object for this :class:`.BakedQuery`. This is equivalent to calling the :class:`.BakedQuery` as a Python callable, e.g. ``result = my_baked_query(session)``. """ return Result(self, session) def __call__(self, session): return self.for_session(session) def spoil(self, full=False): """Cancel any query caching that will occur on this BakedQuery object. The BakedQuery can continue to be used normally, however additional creational functions will not be cached; they will be called on every invocation. This is to support the case where a particular step in constructing a baked query disqualifies the query from being cacheable, such as a variant that relies upon some uncacheable value. :param full: if False, only functions added to this :class:`.BakedQuery` object subsequent to the spoil step will be non-cached; the state of the :class:`.BakedQuery` up until this point will be pulled from the cache. If True, then the entire :class:`_query.Query` object is built from scratch each time, with all creational functions being called on each invocation. """ if not full and not self._spoiled: _spoil_point = self._clone() _spoil_point._cache_key += ("_query_only",) self.steps = [_spoil_point._retrieve_baked_query] self._spoiled = True return self def _effective_key(self, session): """Return the key that actually goes into the cache dictionary for this :class:`.BakedQuery`, taking into account the given :class:`.Session`. This basically means we also will include the session's query_class, as the actual :class:`_query.Query` object is part of what's cached and needs to match the type of :class:`_query.Query` that a later session will want to use. """ return self._cache_key + (session._query_cls,) def _with_lazyload_options(self, options, effective_path, cache_path=None): """Cloning version of _add_lazyload_options. """ q = self._clone() q._add_lazyload_options(options, effective_path, cache_path=cache_path) return q def _add_lazyload_options(self, options, effective_path, cache_path=None): """Used by per-state lazy loaders to add options to the "lazy load" query from a parent query. Creates a cache key based on given load path and query options; if a repeatable cache key cannot be generated, the query is "spoiled" so that it won't use caching. """ key = () if not cache_path: cache_path = effective_path if cache_path.path[0].is_aliased_class: # paths that are against an AliasedClass are unsafe to cache # with since the AliasedClass is an ad-hoc object. self.spoil(full=True) else: for opt in options: cache_key = opt._generate_cache_key(cache_path) if cache_key is False: self.spoil(full=True) elif cache_key is not None: key += cache_key self.add_criteria( lambda q: q._with_current_path( effective_path )._conditional_options(*options), cache_path.path, key, ) def _retrieve_baked_query(self, session): query = self._bakery.get(self._effective_key(session), None) if query is None: query = self._as_query(session) self._bakery[self._effective_key(session)] = query.with_session( None ) return query.with_session(session) def _bake(self, session): query = self._as_query(session) context = query._compile_context() self._bake_subquery_loaders(session, context) context.session = None context.query = query = context.query.with_session(None) query._execution_options = query._execution_options.union( {"compiled_cache": self._bakery} ) # we'll be holding onto the query for some of its state, # so delete some compilation-use-only attributes that can take up # space for attr in ( "_correlate", "_from_obj", "_mapper_adapter_map", "_joinpath", "_joinpoint", ): query.__dict__.pop(attr, None) # if the query is not safe to cache, we still do everything as though # we did cache it, since the receiver of _bake() assumes subqueryload # context was set up, etc. if context.query._bake_ok: self._bakery[self._effective_key(session)] = context return context def to_query(self, query_or_session): """Return the :class:`_query.Query` object for use as a subquery. This method should be used within the lambda callable being used to generate a step of an enclosing :class:`.BakedQuery`. The parameter should normally be the :class:`_query.Query` object that is passed to the lambda:: sub_bq = self.bakery(lambda s: s.query(User.name)) sub_bq += lambda q: q.filter( User.id == Address.user_id).correlate(Address) main_bq = self.bakery(lambda s: s.query(Address)) main_bq += lambda q: q.filter( sub_bq.to_query(q).exists()) In the case where the subquery is used in the first callable against a :class:`.Session`, the :class:`.Session` is also accepted:: sub_bq = self.bakery(lambda s: s.query(User.name)) sub_bq += lambda q: q.filter( User.id == Address.user_id).correlate(Address) main_bq = self.bakery( lambda s: s.query(Address.id, sub_bq.to_query(q).as_scalar()) ) :param query_or_session: a :class:`_query.Query` object or a class :class:`.Session` object, that is assumed to be within the context of an enclosing :class:`.BakedQuery` callable. .. versionadded:: 1.3 """ if isinstance(query_or_session, Session): session = query_or_session elif isinstance(query_or_session, Query): session = query_or_session.session if session is None: raise sa_exc.ArgumentError( "Given Query needs to be associated with a Session" ) else: raise TypeError( "Query or Session object expected, got %r." % type(query_or_session) ) return self._as_query(session) def _as_query(self, session): query = self.steps[0](session) for step in self.steps[1:]: query = step(query) return query def _bake_subquery_loaders(self, session, context): """convert subquery eager loaders in the cache into baked queries. For subquery eager loading to work, all we need here is that the Query point to the correct session when it is run. However, since we are "baking" anyway, we may as well also turn the query into a "baked" query so that we save on performance too. """ context.attributes["baked_queries"] = baked_queries = [] for k, v in list(context.attributes.items()): if isinstance(v, Query): if "subquery" in k: bk = BakedQuery(self._bakery, lambda *args: v) bk._cache_key = self._cache_key + k bk._bake(session) baked_queries.append((k, bk._cache_key, v)) del context.attributes[k] def _unbake_subquery_loaders( self, session, context, params, post_criteria ): """Retrieve subquery eager loaders stored by _bake_subquery_loaders and turn them back into Result objects that will iterate just like a Query object. """ if "baked_queries" not in context.attributes: return for k, cache_key, query in context.attributes["baked_queries"]: bk = BakedQuery( self._bakery, lambda sess, q=query: q.with_session(sess) ) bk._cache_key = cache_key q = bk.for_session(session) for fn in post_criteria: q = q.with_post_criteria(fn) context.attributes[k] = q.params(**params) class Result(object): """Invokes a :class:`.BakedQuery` against a :class:`.Session`. The :class:`.Result` object is where the actual :class:`.query.Query` object gets created, or retrieved from the cache, against a target :class:`.Session`, and is then invoked for results. """ __slots__ = "bq", "session", "_params", "_post_criteria" def __init__(self, bq, session): self.bq = bq self.session = session self._params = {} self._post_criteria = [] def params(self, *args, **kw): """Specify parameters to be replaced into the string SQL statement.""" if len(args) == 1: kw.update(args[0]) elif len(args) > 0: raise sa_exc.ArgumentError( "params() takes zero or one positional argument, " "which is a dictionary." ) self._params.update(kw) return self def _using_post_criteria(self, fns): if fns: self._post_criteria.extend(fns) return self def with_post_criteria(self, fn): """Add a criteria function that will be applied post-cache. This adds a function that will be run against the :class:`_query.Query` object after it is retrieved from the cache. Functions here can be used to alter the query in ways that **do not affect the SQL output**, such as execution options and shard identifiers (when using a shard-enabled query object) .. warning:: :meth:`.Result.with_post_criteria` functions are applied to the :class:`_query.Query` object **after** the query's SQL statement object has been retrieved from the cache. Any operations here which intend to modify the SQL should ensure that :meth:`.BakedQuery.spoil` was called first. .. versionadded:: 1.2 """ return self._using_post_criteria([fn]) def _as_query(self): q = self.bq._as_query(self.session).params(self._params) for fn in self._post_criteria: q = fn(q) return q def __str__(self): return str(self._as_query()) def __iter__(self): bq = self.bq if not self.session.enable_baked_queries or bq._spoiled: return iter(self._as_query()) baked_context = bq._bakery.get(bq._effective_key(self.session), None) if baked_context is None: baked_context = bq._bake(self.session) context = copy.copy(baked_context) context.session = self.session context.attributes = context.attributes.copy() bq._unbake_subquery_loaders( self.session, context, self._params, self._post_criteria ) context.statement.use_labels = True if context.autoflush and not context.populate_existing: self.session._autoflush() q = context.query.params(self._params).with_session(self.session) for fn in self._post_criteria: q = fn(q) return q._execute_and_instances(context) def count(self): """return the 'count'. Equivalent to :meth:`_query.Query.count`. Note this uses a subquery to ensure an accurate count regardless of the structure of the original statement. .. versionadded:: 1.1.6 """ col = func.count(literal_column("*")) bq = self.bq.with_criteria(lambda q: q.from_self(col)) return bq.for_session(self.session).params(self._params).scalar() def scalar(self): """Return the first element of the first result or None if no rows present. If multiple rows are returned, raises MultipleResultsFound. Equivalent to :meth:`_query.Query.scalar`. .. versionadded:: 1.1.6 """ try: ret = self.one() if not isinstance(ret, tuple): return ret return ret[0] except orm_exc.NoResultFound: return None def first(self): """Return the first row. Equivalent to :meth:`_query.Query.first`. """ bq = self.bq.with_criteria(lambda q: q.slice(0, 1)) ret = list( bq.for_session(self.session) .params(self._params) ._using_post_criteria(self._post_criteria) ) if len(ret) > 0: return ret[0] else: return None def one(self): """Return exactly one result or raise an exception. Equivalent to :meth:`_query.Query.one`. """ try: ret = self.one_or_none() except orm_exc.MultipleResultsFound as err: util.raise_( orm_exc.MultipleResultsFound( "Multiple rows were found for one()" ), replace_context=err, ) else: if ret is None: raise orm_exc.NoResultFound("No row was found for one()") return ret def one_or_none(self): """Return one or zero results, or raise an exception for multiple rows. Equivalent to :meth:`_query.Query.one_or_none`. .. versionadded:: 1.0.9 """ ret = list(self) l = len(ret) if l == 1: return ret[0] elif l == 0: return None else: raise orm_exc.MultipleResultsFound( "Multiple rows were found for one_or_none()" ) def all(self): """Return all rows. Equivalent to :meth:`_query.Query.all`. """ return list(self) def get(self, ident): """Retrieve an object based on identity. Equivalent to :meth:`_query.Query.get`. """ query = self.bq.steps[0](self.session) return query._get_impl(ident, self._load_on_pk_identity) def _load_on_pk_identity(self, query, primary_key_identity): """Load the given primary key identity from the database.""" mapper = query._mapper_zero() _get_clause, _get_params = mapper._get_clause def setup(query): _lcl_get_clause = _get_clause q = query._clone() q._get_condition() q._order_by = None # None present in ident - turn those comparisons # into "IS NULL" if None in primary_key_identity: nones = set( [ _get_params[col].key for col, value in zip( mapper.primary_key, primary_key_identity ) if value is None ] ) _lcl_get_clause = sql_util.adapt_criterion_to_null( _lcl_get_clause, nones ) _lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False) q._criterion = _lcl_get_clause for fn in self._post_criteria: q = fn(q) return q # cache the query against a key that includes # which positions in the primary key are NULL # (remember, we can map to an OUTER JOIN) bq = self.bq # add the clause we got from mapper._get_clause to the cache # key so that if a race causes multiple calls to _get_clause, # we've cached on ours bq = bq._clone() bq._cache_key += (_get_clause,) bq = bq.with_criteria( setup, tuple(elem is None for elem in primary_key_identity) ) params = dict( [ (_get_params[primary_key].key, id_val) for id_val, primary_key in zip( primary_key_identity, mapper.primary_key ) ] ) result = list(bq.for_session(self.session).params(**params)) l = len(result) if l > 1: raise orm_exc.MultipleResultsFound() elif l: return result[0] else: return None @util.deprecated( "1.2", "Baked lazy loading is now the default implementation." ) def bake_lazy_loaders(): """Enable the use of baked queries for all lazyloaders systemwide. The "baked" implementation of lazy loading is now the sole implementation for the base lazy loader; this method has no effect except for a warning. """ pass @util.deprecated( "1.2", "Baked lazy loading is now the default implementation." ) def unbake_lazy_loaders(): """Disable the use of baked queries for all lazyloaders systemwide. This method now raises NotImplementedError() as the "baked" implementation is the only lazy load implementation. The :paramref:`_orm.relationship.bake_queries` flag may be used to disable the caching of queries on a per-relationship basis. """ raise NotImplementedError( "Baked lazy loading is now the default implementation" ) @strategy_options.loader_option() def baked_lazyload(loadopt, attr): """Indicate that the given attribute should be loaded using "lazy" loading with a "baked" query used in the load. """ return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"}) @baked_lazyload._add_unbound_fn @util.deprecated( "1.2", "Baked lazy loading is now the default " "implementation for lazy loading.", ) def baked_lazyload(*keys): return strategy_options._UnboundLoad._from_keys( strategy_options._UnboundLoad.baked_lazyload, keys, False, {} ) @baked_lazyload._add_unbound_all_fn @util.deprecated( "1.2", "Baked lazy loading is now the default " "implementation for lazy loading.", ) def baked_lazyload_all(*keys): return strategy_options._UnboundLoad._from_keys( strategy_options._UnboundLoad.baked_lazyload, keys, True, {} ) baked_lazyload = baked_lazyload._unbound_fn baked_lazyload_all = baked_lazyload_all._unbound_all_fn bakery = BakedQuery.bakery
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/automap.py
# ext/automap.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r"""Define an extension to the :mod:`sqlalchemy.ext.declarative` system which automatically generates mapped classes and relationships from a database schema, typically though not necessarily one which is reflected. .. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`. It is hoped that the :class:`.AutomapBase` system provides a quick and modernized solution to the problem that the very famous `SQLSoup <https://sqlsoup.readthedocs.io/en/latest/>`_ also tries to solve, that of generating a quick and rudimentary object model from an existing database on the fly. By addressing the issue strictly at the mapper configuration level, and integrating fully with existing Declarative class techniques, :class:`.AutomapBase` seeks to provide a well-integrated approach to the issue of expediently auto-generating ad-hoc mappings. Basic Use ========= The simplest usage is to reflect an existing database into a new model. We create a new :class:`.AutomapBase` class in a similar manner as to how we create a declarative base class, using :func:`.automap_base`. We then call :meth:`.AutomapBase.prepare` on the resulting base class, asking it to reflect the schema and produce mappings:: from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine Base = automap_base() # engine, suppose it has two tables 'user' and 'address' set up engine = create_engine("sqlite:///mydatabase.db") # reflect the tables Base.prepare(engine, reflect=True) # mapped classes are now created with names by default # matching that of the table name. User = Base.classes.user Address = Base.classes.address session = Session(engine) # rudimentary relationships are produced session.add(Address(email_address="foo@bar.com", user=User(name="foo"))) session.commit() # collection-based relationships are by default named # "<classname>_collection" print (u1.address_collection) Above, calling :meth:`.AutomapBase.prepare` while passing along the :paramref:`.AutomapBase.prepare.reflect` parameter indicates that the :meth:`_schema.MetaData.reflect` method will be called on this declarative base classes' :class:`_schema.MetaData` collection; then, each **viable** :class:`_schema.Table` within the :class:`_schema.MetaData` will get a new mapped class generated automatically. The :class:`_schema.ForeignKeyConstraint` objects which link the various tables together will be used to produce new, bidirectional :func:`_orm.relationship` objects between classes. The classes and relationships follow along a default naming scheme that we can customize. At this point, our basic mapping consisting of related ``User`` and ``Address`` classes is ready to use in the traditional way. .. note:: By **viable**, we mean that for a table to be mapped, it must specify a primary key. Additionally, if the table is detected as being a pure association table between two other tables, it will not be directly mapped and will instead be configured as a many-to-many table between the mappings for the two referring tables. Generating Mappings from an Existing MetaData ============================================= We can pass a pre-declared :class:`_schema.MetaData` object to :func:`.automap_base`. This object can be constructed in any way, including programmatically, from a serialized file, or from itself being reflected using :meth:`_schema.MetaData.reflect`. Below we illustrate a combination of reflection and explicit table declaration:: from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey from sqlalchemy.ext.automap import automap_base engine = create_engine("sqlite:///mydatabase.db") # produce our own MetaData object metadata = MetaData() # we can reflect it ourselves from a database, using options # such as 'only' to limit what tables we look at... metadata.reflect(engine, only=['user', 'address']) # ... or just define our own Table objects with it (or combine both) Table('user_order', metadata, Column('id', Integer, primary_key=True), Column('user_id', ForeignKey('user.id')) ) # we can then produce a set of mappings from this MetaData. Base = automap_base(metadata=metadata) # calling prepare() just sets up mapped classes and relationships. Base.prepare() # mapped classes are ready User, Address, Order = Base.classes.user, Base.classes.address,\ Base.classes.user_order Specifying Classes Explicitly ============================= The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined explicitly, in a way similar to that of the :class:`.DeferredReflection` class. Classes that extend from :class:`.AutomapBase` act like regular declarative classes, but are not immediately mapped after their construction, and are instead mapped when we call :meth:`.AutomapBase.prepare`. The :meth:`.AutomapBase.prepare` method will make use of the classes we've established based on the table name we use. If our schema contains tables ``user`` and ``address``, we can define one or both of the classes to be used:: from sqlalchemy.ext.automap import automap_base from sqlalchemy import create_engine # automap base Base = automap_base() # pre-declare User for the 'user' table class User(Base): __tablename__ = 'user' # override schema elements like Columns user_name = Column('name', String) # override relationships too, if desired. # we must use the same name that automap would use for the # relationship, and also must refer to the class name that automap will # generate for "address" address_collection = relationship("address", collection_class=set) # reflect engine = create_engine("sqlite:///mydatabase.db") Base.prepare(engine, reflect=True) # we still have Address generated from the tablename "address", # but User is the same as Base.classes.User now Address = Base.classes.address u1 = session.query(User).first() print (u1.address_collection) # the backref is still there: a1 = session.query(Address).first() print (a1.user) Above, one of the more intricate details is that we illustrated overriding one of the :func:`_orm.relationship` objects that automap would have created. To do this, we needed to make sure the names match up with what automap would normally generate, in that the relationship name would be ``User.address_collection`` and the name of the class referred to, from automap's perspective, is called ``address``, even though we are referring to it as ``Address`` within our usage of this class. Overriding Naming Schemes ========================= :mod:`.sqlalchemy.ext.automap` is tasked with producing mapped classes and relationship names based on a schema, which means it has decision points in how these names are determined. These three decision points are provided using functions which can be passed to the :meth:`.AutomapBase.prepare` method, and are known as :func:`.classname_for_table`, :func:`.name_for_scalar_relationship`, and :func:`.name_for_collection_relationship`. Any or all of these functions are provided as in the example below, where we use a "camel case" scheme for class names and a "pluralizer" for collection names using the `Inflect <https://pypi.python.org/pypi/inflect>`_ package:: import re import inflect def camelize_classname(base, tablename, table): "Produce a 'camelized' class name, e.g. " "'words_and_underscores' -> 'WordsAndUnderscores'" return str(tablename[0].upper() + \ re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:])) _pluralizer = inflect.engine() def pluralize_collection(base, local_cls, referred_cls, constraint): "Produce an 'uncamelized', 'pluralized' class name, e.g. " "'SomeTerm' -> 'some_terms'" referred_name = referred_cls.__name__ uncamelized = re.sub(r'[A-Z]', lambda m: "_%s" % m.group(0).lower(), referred_name)[1:] pluralized = _pluralizer.plural(uncamelized) return pluralized from sqlalchemy.ext.automap import automap_base Base = automap_base() engine = create_engine("sqlite:///mydatabase.db") Base.prepare(engine, reflect=True, classname_for_table=camelize_classname, name_for_collection_relationship=pluralize_collection ) From the above mapping, we would now have classes ``User`` and ``Address``, where the collection from ``User`` to ``Address`` is called ``User.addresses``:: User, Address = Base.classes.User, Base.classes.Address u1 = User(addresses=[Address(email="foo@bar.com")]) Relationship Detection ====================== The vast majority of what automap accomplishes is the generation of :func:`_orm.relationship` structures based on foreign keys. The mechanism by which this works for many-to-one and one-to-many relationships is as follows: 1. A given :class:`_schema.Table`, known to be mapped to a particular class, is examined for :class:`_schema.ForeignKeyConstraint` objects. 2. From each :class:`_schema.ForeignKeyConstraint`, the remote :class:`_schema.Table` object present is matched up to the class to which it is to be mapped, if any, else it is skipped. 3. As the :class:`_schema.ForeignKeyConstraint` we are examining corresponds to a reference from the immediate mapped class, the relationship will be set up as a many-to-one referring to the referred class; a corresponding one-to-many backref will be created on the referred class referring to this class. 4. If any of the columns that are part of the :class:`_schema.ForeignKeyConstraint` are not nullable (e.g. ``nullable=False``), a :paramref:`_orm.relationship.cascade` keyword argument of ``all, delete-orphan`` will be added to the keyword arguments to be passed to the relationship or backref. If the :class:`_schema.ForeignKeyConstraint` reports that :paramref:`_schema.ForeignKeyConstraint.ondelete` is set to ``CASCADE`` for a not null or ``SET NULL`` for a nullable set of columns, the option :paramref:`_orm.relationship.passive_deletes` flag is set to ``True`` in the set of relationship keyword arguments. Note that not all backends support reflection of ON DELETE. .. versionadded:: 1.0.0 - automap will detect non-nullable foreign key constraints when producing a one-to-many relationship and establish a default cascade of ``all, delete-orphan`` if so; additionally, if the constraint specifies :paramref:`_schema.ForeignKeyConstraint.ondelete` of ``CASCADE`` for non-nullable or ``SET NULL`` for nullable columns, the ``passive_deletes=True`` option is also added. 5. The names of the relationships are determined using the :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and :paramref:`.AutomapBase.prepare.name_for_collection_relationship` callable functions. It is important to note that the default relationship naming derives the name from the **the actual class name**. If you've given a particular class an explicit name by declaring it, or specified an alternate class naming scheme, that's the name from which the relationship name will be derived. 6. The classes are inspected for an existing mapped property matching these names. If one is detected on one side, but none on the other side, :class:`.AutomapBase` attempts to create a relationship on the missing side, then uses the :paramref:`_orm.relationship.back_populates` parameter in order to point the new relationship to the other side. 7. In the usual case where no relationship is on either side, :meth:`.AutomapBase.prepare` produces a :func:`_orm.relationship` on the "many-to-one" side and matches it to the other using the :paramref:`_orm.relationship.backref` parameter. 8. Production of the :func:`_orm.relationship` and optionally the :func:`.backref` is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship` function, which can be supplied by the end-user in order to augment the arguments passed to :func:`_orm.relationship` or :func:`.backref` or to make use of custom implementations of these functions. Custom Relationship Arguments ----------------------------- The :paramref:`.AutomapBase.prepare.generate_relationship` hook can be used to add parameters to relationships. For most cases, we can make use of the existing :func:`.automap.generate_relationship` function to return the object, after augmenting the given keyword dictionary with our own arguments. Below is an illustration of how to send :paramref:`_orm.relationship.cascade` and :paramref:`_orm.relationship.passive_deletes` options along to all one-to-many relationships:: from sqlalchemy.ext.automap import generate_relationship def _gen_relationship(base, direction, return_fn, attrname, local_cls, referred_cls, **kw): if direction is interfaces.ONETOMANY: kw['cascade'] = 'all, delete-orphan' kw['passive_deletes'] = True # make use of the built-in function to actually return # the result. return generate_relationship(base, direction, return_fn, attrname, local_cls, referred_cls, **kw) from sqlalchemy.ext.automap import automap_base from sqlalchemy import create_engine # automap base Base = automap_base() engine = create_engine("sqlite:///mydatabase.db") Base.prepare(engine, reflect=True, generate_relationship=_gen_relationship) Many-to-Many relationships -------------------------- :mod:`.sqlalchemy.ext.automap` will generate many-to-many relationships, e.g. those which contain a ``secondary`` argument. The process for producing these is as follows: 1. A given :class:`_schema.Table` is examined for :class:`_schema.ForeignKeyConstraint` objects, before any mapped class has been assigned to it. 2. If the table contains two and exactly two :class:`_schema.ForeignKeyConstraint` objects, and all columns within this table are members of these two :class:`_schema.ForeignKeyConstraint` objects, the table is assumed to be a "secondary" table, and will **not be mapped directly**. 3. The two (or one, for self-referential) external tables to which the :class:`_schema.Table` refers to are matched to the classes to which they will be mapped, if any. 4. If mapped classes for both sides are located, a many-to-many bi-directional :func:`_orm.relationship` / :func:`.backref` pair is created between the two classes. 5. The override logic for many-to-many works the same as that of one-to-many/ many-to-one; the :func:`.generate_relationship` function is called upon to generate the structures and existing attributes will be maintained. Relationships with Inheritance ------------------------------ :mod:`.sqlalchemy.ext.automap` will not generate any relationships between two classes that are in an inheritance relationship. That is, with two classes given as follows:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'polymorphic_on': type } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) __mapper_args__ = { 'polymorphic_identity':'engineer', } The foreign key from ``Engineer`` to ``Employee`` is used not for a relationship, but to establish joined inheritance between the two classes. Note that this means automap will not generate *any* relationships for foreign keys that link from a subclass to a superclass. If a mapping has actual relationships from subclass to superclass as well, those need to be explicit. Below, as we have two separate foreign keys from ``Engineer`` to ``Employee``, we need to set up both the relationship we want as well as the ``inherit_condition``, as these are not things SQLAlchemy can guess:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'polymorphic_on':type } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) favorite_employee_id = Column(Integer, ForeignKey('employee.id')) favorite_employee = relationship(Employee, foreign_keys=favorite_employee_id) __mapper_args__ = { 'polymorphic_identity':'engineer', 'inherit_condition': id == Employee.id } Handling Simple Naming Conflicts -------------------------------- In the case of naming conflicts during mapping, override any of :func:`.classname_for_table`, :func:`.name_for_scalar_relationship`, and :func:`.name_for_collection_relationship` as needed. For example, if automap is attempting to name a many-to-one relationship the same as an existing column, an alternate convention can be conditionally selected. Given a schema: .. sourcecode:: sql CREATE TABLE table_a ( id INTEGER PRIMARY KEY ); CREATE TABLE table_b ( id INTEGER PRIMARY KEY, table_a INTEGER, FOREIGN KEY(table_a) REFERENCES table_a(id) ); The above schema will first automap the ``table_a`` table as a class named ``table_a``; it will then automap a relationship onto the class for ``table_b`` with the same name as this related class, e.g. ``table_a``. This relationship name conflicts with the mapping column ``table_b.table_a``, and will emit an error on mapping. We can resolve this conflict by using an underscore as follows:: def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): name = referred_cls.__name__.lower() local_table = local_cls.__table__ if name in local_table.columns: newname = name + "_" warnings.warn( "Already detected name %s present. using %s" % (name, newname)) return newname return name Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship) Alternatively, we can change the name on the column side. The columns that are mapped can be modified using the technique described at :ref:`mapper_column_distinct_names`, by assigning the column explicitly to a new name:: Base = automap_base() class TableB(Base): __tablename__ = 'table_b' _table_a = Column('table_a', ForeignKey('table_a.id')) Base.prepare(engine, reflect=True) Using Automap with Explicit Declarations ======================================== As noted previously, automap has no dependency on reflection, and can make use of any collection of :class:`_schema.Table` objects within a :class:`_schema.MetaData` collection. From this, it follows that automap can also be used generate missing relationships given an otherwise complete model that fully defines table metadata:: from sqlalchemy.ext.automap import automap_base from sqlalchemy import Column, Integer, String, ForeignKey Base = automap_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(ForeignKey('user.id')) # produce relationships Base.prepare() # mapping is complete, with "address_collection" and # "user" relationships a1 = Address(email='u1') a2 = Address(email='u2') u1 = User(address_collection=[a1, a2]) assert a1.user is u1 Above, given mostly complete ``User`` and ``Address`` mappings, the :class:`_schema.ForeignKey` which we defined on ``Address.user_id`` allowed a bidirectional relationship pair ``Address.user`` and ``User.address_collection`` to be generated on the mapped classes. Note that when subclassing :class:`.AutomapBase`, the :meth:`.AutomapBase.prepare` method is required; if not called, the classes we've declared are in an un-mapped state. """ # noqa from .declarative import declarative_base as _declarative_base from .declarative.base import _DeferredMapperConfig from .. import util from ..orm import backref from ..orm import exc as orm_exc from ..orm import interfaces from ..orm import relationship from ..orm.mapper import _CONFIGURE_MUTEX from ..schema import ForeignKeyConstraint from ..sql import and_ def classname_for_table(base, tablename, table): """Return the class name that should be used, given the name of a table. The default implementation is:: return str(tablename) Alternate implementations can be specified using the :paramref:`.AutomapBase.prepare.classname_for_table` parameter. :param base: the :class:`.AutomapBase` class doing the prepare. :param tablename: string name of the :class:`_schema.Table`. :param table: the :class:`_schema.Table` object itself. :return: a string class name. .. note:: In Python 2, the string used for the class name **must** be a non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute of :class:`_schema.Table` is typically a Python unicode subclass, so the ``str()`` function should be applied to this name, after accounting for any non-ASCII characters. """ return str(tablename) def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): """Return the attribute name that should be used to refer from one class to another, for a scalar object reference. The default implementation is:: return referred_cls.__name__.lower() Alternate implementations can be specified using the :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` parameter. :param base: the :class:`.AutomapBase` class doing the prepare. :param local_cls: the class to be mapped on the local side. :param referred_cls: the class to be mapped on the referring side. :param constraint: the :class:`_schema.ForeignKeyConstraint` that is being inspected to produce this relationship. """ return referred_cls.__name__.lower() def name_for_collection_relationship( base, local_cls, referred_cls, constraint ): """Return the attribute name that should be used to refer from one class to another, for a collection reference. The default implementation is:: return referred_cls.__name__.lower() + "_collection" Alternate implementations can be specified using the :paramref:`.AutomapBase.prepare.name_for_collection_relationship` parameter. :param base: the :class:`.AutomapBase` class doing the prepare. :param local_cls: the class to be mapped on the local side. :param referred_cls: the class to be mapped on the referring side. :param constraint: the :class:`_schema.ForeignKeyConstraint` that is being inspected to produce this relationship. """ return referred_cls.__name__.lower() + "_collection" def generate_relationship( base, direction, return_fn, attrname, local_cls, referred_cls, **kw ): r"""Generate a :func:`_orm.relationship` or :func:`.backref` on behalf of two mapped classes. An alternate implementation of this function can be specified using the :paramref:`.AutomapBase.prepare.generate_relationship` parameter. The default implementation of this function is as follows:: if return_fn is backref: return return_fn(attrname, **kw) elif return_fn is relationship: return return_fn(referred_cls, **kw) else: raise TypeError("Unknown relationship function: %s" % return_fn) :param base: the :class:`.AutomapBase` class doing the prepare. :param direction: indicate the "direction" of the relationship; this will be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOMANY`. :param return_fn: the function that is used by default to create the relationship. This will be either :func:`_orm.relationship` or :func:`.backref`. The :func:`.backref` function's result will be used to produce a new :func:`_orm.relationship` in a second step, so it is critical that user-defined implementations correctly differentiate between the two functions, if a custom relationship function is being used. :param attrname: the attribute name to which this relationship is being assigned. If the value of :paramref:`.generate_relationship.return_fn` is the :func:`.backref` function, then this name is the name that is being assigned to the backref. :param local_cls: the "local" class to which this relationship or backref will be locally present. :param referred_cls: the "referred" class to which the relationship or backref refers to. :param \**kw: all additional keyword arguments are passed along to the function. :return: a :func:`_orm.relationship` or :func:`.backref` construct, as dictated by the :paramref:`.generate_relationship.return_fn` parameter. """ if return_fn is backref: return return_fn(attrname, **kw) elif return_fn is relationship: return return_fn(referred_cls, **kw) else: raise TypeError("Unknown relationship function: %s" % return_fn) class AutomapBase(object): """Base class for an "automap" schema. The :class:`.AutomapBase` class can be compared to the "declarative base" class that is produced by the :func:`.declarative.declarative_base` function. In practice, the :class:`.AutomapBase` class is always used as a mixin along with an actual declarative base. A new subclassable :class:`.AutomapBase` is typically instantiated using the :func:`.automap_base` function. .. seealso:: :ref:`automap_toplevel` """ __abstract__ = True classes = None """An instance of :class:`.util.Properties` containing classes. This object behaves much like the ``.c`` collection on a table. Classes are present under the name they were given, e.g.:: Base = automap_base() Base.prepare(engine=some_engine, reflect=True) User, Address = Base.classes.User, Base.classes.Address """ @classmethod def prepare( cls, engine=None, reflect=False, schema=None, classname_for_table=classname_for_table, collection_class=list, name_for_scalar_relationship=name_for_scalar_relationship, name_for_collection_relationship=name_for_collection_relationship, generate_relationship=generate_relationship, ): """Extract mapped classes and relationships from the :class:`_schema.MetaData` and perform mappings. :param engine: an :class:`_engine.Engine` or :class:`_engine.Connection` with which to perform schema reflection, if specified. If the :paramref:`.AutomapBase.prepare.reflect` argument is False, this object is not used. :param reflect: if True, the :meth:`_schema.MetaData.reflect` method is called on the :class:`_schema.MetaData` associated with this :class:`.AutomapBase`. The :class:`_engine.Engine` passed via :paramref:`.AutomapBase.prepare.engine` will be used to perform the reflection if present; else, the :class:`_schema.MetaData` should already be bound to some engine else the operation will fail. :param classname_for_table: callable function which will be used to produce new class names, given a table name. Defaults to :func:`.classname_for_table`. :param name_for_scalar_relationship: callable function which will be used to produce relationship names for scalar relationships. Defaults to :func:`.name_for_scalar_relationship`. :param name_for_collection_relationship: callable function which will be used to produce relationship names for collection-oriented relationships. Defaults to :func:`.name_for_collection_relationship`. :param generate_relationship: callable function which will be used to actually generate :func:`_orm.relationship` and :func:`.backref` constructs. Defaults to :func:`.generate_relationship`. :param collection_class: the Python collection class that will be used when a new :func:`_orm.relationship` object is created that represents a collection. Defaults to ``list``. :param schema: When present in conjunction with the :paramref:`.AutomapBase.prepare.reflect` flag, is passed to :meth:`_schema.MetaData.reflect` to indicate the primary schema where tables should be reflected from. When omitted, the default schema in use by the database connection is used. .. versionadded:: 1.1 """ if reflect: cls.metadata.reflect( engine, schema=schema, extend_existing=True, autoload_replace=False, ) _CONFIGURE_MUTEX.acquire() try: table_to_map_config = dict( (m.local_table, m) for m in _DeferredMapperConfig.classes_for_base( cls, sort=False ) ) many_to_many = [] for table in cls.metadata.tables.values(): lcl_m2m, rem_m2m, m2m_const = _is_many_to_many(cls, table) if lcl_m2m is not None: many_to_many.append((lcl_m2m, rem_m2m, m2m_const, table)) elif not table.primary_key: continue elif table not in table_to_map_config: mapped_cls = type( classname_for_table(cls, table.name, table), (cls,), {"__table__": table}, ) map_config = _DeferredMapperConfig.config_for_cls( mapped_cls ) cls.classes[map_config.cls.__name__] = mapped_cls table_to_map_config[table] = map_config for map_config in table_to_map_config.values(): _relationships_for_fks( cls, map_config, table_to_map_config, collection_class, name_for_scalar_relationship, name_for_collection_relationship, generate_relationship, ) for lcl_m2m, rem_m2m, m2m_const, table in many_to_many: _m2m_relationship( cls, lcl_m2m, rem_m2m, m2m_const, table, table_to_map_config, collection_class, name_for_scalar_relationship, name_for_collection_relationship, generate_relationship, ) for map_config in _DeferredMapperConfig.classes_for_base(cls): map_config.map() finally: _CONFIGURE_MUTEX.release() _sa_decl_prepare = True """Indicate that the mapping of classes should be deferred. The presence of this attribute name indicates to declarative that the call to mapper() should not occur immediately; instead, information about the table and attributes to be mapped are gathered into an internal structure called _DeferredMapperConfig. These objects can be collected later using classes_for_base(), additional mapping decisions can be made, and then the map() method will actually apply the mapping. The only real reason this deferral of the whole thing is needed is to support primary key columns that aren't reflected yet when the class is declared; everything else can theoretically be added to the mapper later. However, the _DeferredMapperConfig is a nice interface in any case which exists at that not usually exposed point at which declarative has the class and the Table but hasn't called mapper() yet. """ @classmethod def _sa_raise_deferred_config(cls): raise orm_exc.UnmappedClassError( cls, msg="Class %s is a subclass of AutomapBase. " "Mappings are not produced until the .prepare() " "method is called on the class hierarchy." % orm_exc._safe_cls_name(cls), ) def automap_base(declarative_base=None, **kw): r"""Produce a declarative automap base. This function produces a new base class that is a product of the :class:`.AutomapBase` class as well a declarative base produced by :func:`.declarative.declarative_base`. All parameters other than ``declarative_base`` are keyword arguments that are passed directly to the :func:`.declarative.declarative_base` function. :param declarative_base: an existing class produced by :func:`.declarative.declarative_base`. When this is passed, the function no longer invokes :func:`.declarative.declarative_base` itself, and all other keyword arguments are ignored. :param \**kw: keyword arguments are passed along to :func:`.declarative.declarative_base`. """ if declarative_base is None: Base = _declarative_base(**kw) else: Base = declarative_base return type( Base.__name__, (AutomapBase, Base), {"__abstract__": True, "classes": util.Properties({})}, ) def _is_many_to_many(automap_base, table): fk_constraints = [ const for const in table.constraints if isinstance(const, ForeignKeyConstraint) ] if len(fk_constraints) != 2: return None, None, None cols = sum( [ [fk.parent for fk in fk_constraint.elements] for fk_constraint in fk_constraints ], [], ) if set(cols) != set(table.c): return None, None, None return ( fk_constraints[0].elements[0].column.table, fk_constraints[1].elements[0].column.table, fk_constraints, ) def _relationships_for_fks( automap_base, map_config, table_to_map_config, collection_class, name_for_scalar_relationship, name_for_collection_relationship, generate_relationship, ): local_table = map_config.local_table local_cls = map_config.cls # derived from a weakref, may be None if local_table is None or local_cls is None: return for constraint in local_table.constraints: if isinstance(constraint, ForeignKeyConstraint): fks = constraint.elements referred_table = fks[0].column.table referred_cfg = table_to_map_config.get(referred_table, None) if referred_cfg is None: continue referred_cls = referred_cfg.cls if local_cls is not referred_cls and issubclass( local_cls, referred_cls ): continue relationship_name = name_for_scalar_relationship( automap_base, local_cls, referred_cls, constraint ) backref_name = name_for_collection_relationship( automap_base, referred_cls, local_cls, constraint ) o2m_kws = {} nullable = False not in {fk.parent.nullable for fk in fks} if not nullable: o2m_kws["cascade"] = "all, delete-orphan" if ( constraint.ondelete and constraint.ondelete.lower() == "cascade" ): o2m_kws["passive_deletes"] = True else: if ( constraint.ondelete and constraint.ondelete.lower() == "set null" ): o2m_kws["passive_deletes"] = True create_backref = backref_name not in referred_cfg.properties if relationship_name not in map_config.properties: if create_backref: backref_obj = generate_relationship( automap_base, interfaces.ONETOMANY, backref, backref_name, referred_cls, local_cls, collection_class=collection_class, **o2m_kws ) else: backref_obj = None rel = generate_relationship( automap_base, interfaces.MANYTOONE, relationship, relationship_name, local_cls, referred_cls, foreign_keys=[fk.parent for fk in constraint.elements], backref=backref_obj, remote_side=[fk.column for fk in constraint.elements], ) if rel is not None: map_config.properties[relationship_name] = rel if not create_backref: referred_cfg.properties[ backref_name ].back_populates = relationship_name elif create_backref: rel = generate_relationship( automap_base, interfaces.ONETOMANY, relationship, backref_name, referred_cls, local_cls, foreign_keys=[fk.parent for fk in constraint.elements], back_populates=relationship_name, collection_class=collection_class, **o2m_kws ) if rel is not None: referred_cfg.properties[backref_name] = rel map_config.properties[ relationship_name ].back_populates = backref_name def _m2m_relationship( automap_base, lcl_m2m, rem_m2m, m2m_const, table, table_to_map_config, collection_class, name_for_scalar_relationship, name_for_collection_relationship, generate_relationship, ): map_config = table_to_map_config.get(lcl_m2m, None) referred_cfg = table_to_map_config.get(rem_m2m, None) if map_config is None or referred_cfg is None: return local_cls = map_config.cls referred_cls = referred_cfg.cls relationship_name = name_for_collection_relationship( automap_base, local_cls, referred_cls, m2m_const[0] ) backref_name = name_for_collection_relationship( automap_base, referred_cls, local_cls, m2m_const[1] ) create_backref = backref_name not in referred_cfg.properties if relationship_name not in map_config.properties: if create_backref: backref_obj = generate_relationship( automap_base, interfaces.MANYTOMANY, backref, backref_name, referred_cls, local_cls, collection_class=collection_class, ) else: backref_obj = None rel = generate_relationship( automap_base, interfaces.MANYTOMANY, relationship, relationship_name, local_cls, referred_cls, secondary=table, primaryjoin=and_( fk.column == fk.parent for fk in m2m_const[0].elements ), secondaryjoin=and_( fk.column == fk.parent for fk in m2m_const[1].elements ), backref=backref_obj, collection_class=collection_class, ) if rel is not None: map_config.properties[relationship_name] = rel if not create_backref: referred_cfg.properties[ backref_name ].back_populates = relationship_name elif create_backref: rel = generate_relationship( automap_base, interfaces.MANYTOMANY, relationship, backref_name, referred_cls, local_cls, secondary=table, primaryjoin=and_( fk.column == fk.parent for fk in m2m_const[1].elements ), secondaryjoin=and_( fk.column == fk.parent for fk in m2m_const[0].elements ), back_populates=relationship_name, collection_class=collection_class, ) if rel is not None: referred_cfg.properties[backref_name] = rel map_config.properties[ relationship_name ].back_populates = backref_name
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/horizontal_shard.py
# ext/horizontal_shard.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Horizontal sharding support. Defines a rudimental 'horizontal sharding' system which allows a Session to distribute queries and persistence operations across multiple databases. For a usage example, see the :ref:`examples_sharding` example included in the source distribution. """ from .. import inspect from .. import util from ..orm.query import Query from ..orm.session import Session __all__ = ["ShardedSession", "ShardedQuery"] class ShardedQuery(Query): def __init__(self, *args, **kwargs): super(ShardedQuery, self).__init__(*args, **kwargs) self.id_chooser = self.session.id_chooser self.query_chooser = self.session.query_chooser self._shard_id = None def set_shard(self, shard_id): """return a new query, limited to a single shard ID. all subsequent operations with the returned query will be against the single shard regardless of other state. """ q = self._clone() q._shard_id = shard_id return q def _execute_and_instances(self, context): def iter_for_shard(shard_id): context.attributes["shard_id"] = context.identity_token = shard_id result = self._connection_from_session( mapper=self._bind_mapper(), shard_id=shard_id ).execute(context.statement, self._params) return self.instances(result, context) if context.identity_token is not None: return iter_for_shard(context.identity_token) elif self._shard_id is not None: return iter_for_shard(self._shard_id) else: partial = [] for shard_id in self.query_chooser(self): partial.extend(iter_for_shard(shard_id)) # if some kind of in memory 'sorting' # were done, this is where it would happen return iter(partial) def _execute_crud(self, stmt, mapper): def exec_for_shard(shard_id): conn = self._connection_from_session( mapper=mapper, shard_id=shard_id, clause=stmt, close_with_result=True, ) result = conn.execute(stmt, self._params) return result if self._shard_id is not None: return exec_for_shard(self._shard_id) else: rowcount = 0 results = [] for shard_id in self.query_chooser(self): result = exec_for_shard(shard_id) rowcount += result.rowcount results.append(result) return ShardedResult(results, rowcount) def _identity_lookup( self, mapper, primary_key_identity, identity_token=None, lazy_loaded_from=None, **kw ): """override the default Query._identity_lookup method so that we search for a given non-token primary key identity across all possible identity tokens (e.g. shard ids). """ if identity_token is not None: return super(ShardedQuery, self)._identity_lookup( mapper, primary_key_identity, identity_token=identity_token, **kw ) else: q = self.session.query(mapper) if lazy_loaded_from: q = q._set_lazyload_from(lazy_loaded_from) for shard_id in self.id_chooser(q, primary_key_identity): obj = super(ShardedQuery, self)._identity_lookup( mapper, primary_key_identity, identity_token=shard_id, **kw ) if obj is not None: return obj return None def _get_impl(self, primary_key_identity, db_load_fn, identity_token=None): """Override the default Query._get_impl() method so that we emit a query to the DB for each possible identity token, if we don't have one already. """ def _db_load_fn(query, primary_key_identity): # load from the database. The original db_load_fn will # use the given Query object to load from the DB, so our # shard_id is what will indicate the DB that we query from. if self._shard_id is not None: return db_load_fn(self, primary_key_identity) else: ident = util.to_list(primary_key_identity) # build a ShardedQuery for each shard identifier and # try to load from the DB for shard_id in self.id_chooser(self, ident): q = self.set_shard(shard_id) o = db_load_fn(q, ident) if o is not None: return o else: return None if identity_token is None and self._shard_id is not None: identity_token = self._shard_id return super(ShardedQuery, self)._get_impl( primary_key_identity, _db_load_fn, identity_token=identity_token ) class ShardedResult(object): """A value object that represents multiple :class:`_engine.ResultProxy` objects. This is used by the :meth:`.ShardedQuery._execute_crud` hook to return an object that takes the place of the single :class:`_engine.ResultProxy`. Attribute include ``result_proxies``, which is a sequence of the actual :class:`_engine.ResultProxy` objects, as well as ``aggregate_rowcount`` or ``rowcount``, which is the sum of all the individual rowcount values. .. versionadded:: 1.3 """ __slots__ = ("result_proxies", "aggregate_rowcount") def __init__(self, result_proxies, aggregate_rowcount): self.result_proxies = result_proxies self.aggregate_rowcount = aggregate_rowcount @property def rowcount(self): return self.aggregate_rowcount class ShardedSession(Session): def __init__( self, shard_chooser, id_chooser, query_chooser, shards=None, query_cls=ShardedQuery, **kwargs ): """Construct a ShardedSession. :param shard_chooser: A callable which, passed a Mapper, a mapped instance, and possibly a SQL clause, returns a shard ID. This id may be based off of the attributes present within the object, or on some round-robin scheme. If the scheme is based on a selection, it should set whatever state on the instance to mark it in the future as participating in that shard. :param id_chooser: A callable, passed a query and a tuple of identity values, which should return a list of shard ids where the ID might reside. The databases will be queried in the order of this listing. :param query_chooser: For a given Query, returns the list of shard_ids where the query should be issued. Results from all shards returned will be combined together into a single listing. :param shards: A dictionary of string shard names to :class:`~sqlalchemy.engine.Engine` objects. """ super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs) self.shard_chooser = shard_chooser self.id_chooser = id_chooser self.query_chooser = query_chooser self.__binds = {} self.connection_callable = self.connection if shards is not None: for k in shards: self.bind_shard(k, shards[k]) def _choose_shard_and_assign(self, mapper, instance, **kw): if instance is not None: state = inspect(instance) if state.key: token = state.key[2] assert token is not None return token elif state.identity_token: return state.identity_token shard_id = self.shard_chooser(mapper, instance, **kw) if instance is not None: state.identity_token = shard_id return shard_id def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): if shard_id is None: shard_id = self._choose_shard_and_assign(mapper, instance) if self.transaction is not None: return self.transaction.connection(mapper, shard_id=shard_id) else: return self.get_bind( mapper, shard_id=shard_id, instance=instance )._contextual_connect(**kwargs) def get_bind( self, mapper, shard_id=None, instance=None, clause=None, **kw ): if shard_id is None: shard_id = self._choose_shard_and_assign( mapper, instance, clause=clause ) return self.__binds[shard_id] def bind_shard(self, shard_id, bind): self.__binds[shard_id] = bind
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/mutable.py
# ext/mutable.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r"""Provide support for tracking of in-place changes to scalar values, which are propagated into ORM change events on owning parent objects. .. _mutable_scalars: Establishing Mutability on Scalar Column Values =============================================== A typical example of a "mutable" structure is a Python dictionary. Following the example introduced in :ref:`types_toplevel`, we begin with a custom type that marshals Python dictionaries into JSON strings before being persisted:: from sqlalchemy.types import TypeDecorator, VARCHAR import json class JSONEncodedDict(TypeDecorator): "Represents an immutable structure as a json-encoded string." impl = VARCHAR def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value The usage of ``json`` is only for the purposes of example. The :mod:`sqlalchemy.ext.mutable` extension can be used with any type whose target Python type may be mutable, including :class:`.PickleType`, :class:`_postgresql.ARRAY`, etc. When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself tracks all parents which reference it. Below, we illustrate a simple version of the :class:`.MutableDict` dictionary object, which applies the :class:`.Mutable` mixin to a plain Python dictionary:: from sqlalchemy.ext.mutable import Mutable class MutableDict(Mutable, dict): @classmethod def coerce(cls, key, value): "Convert plain dictionaries to MutableDict." if not isinstance(value, MutableDict): if isinstance(value, dict): return MutableDict(value) # this call will raise ValueError return Mutable.coerce(key, value) else: return value def __setitem__(self, key, value): "Detect dictionary set events and emit change events." dict.__setitem__(self, key, value) self.changed() def __delitem__(self, key): "Detect dictionary del events and emit change events." dict.__delitem__(self, key) self.changed() The above dictionary class takes the approach of subclassing the Python built-in ``dict`` to produce a dict subclass which routes all mutation events through ``__setitem__``. There are variants on this approach, such as subclassing ``UserDict.UserDict`` or ``collections.MutableMapping``; the part that's important to this example is that the :meth:`.Mutable.changed` method is called whenever an in-place change to the datastructure takes place. We also redefine the :meth:`.Mutable.coerce` method which will be used to convert any values that are not instances of ``MutableDict``, such as the plain dictionaries returned by the ``json`` module, into the appropriate type. Defining this method is optional; we could just as well created our ``JSONEncodedDict`` such that it always returns an instance of ``MutableDict``, and additionally ensured that all calling code uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not overridden, any values applied to a parent object which are not instances of the mutable type will raise a ``ValueError``. Our new ``MutableDict`` type offers a class method :meth:`~.Mutable.as_mutable` which we can use within column metadata to associate with types. This method grabs the given type object or class and associates a listener that will detect all future mappings of this type, applying event listening instrumentation to the mapped attribute. Such as, with classical table metadata:: from sqlalchemy import Table, Column, Integer my_data = Table('my_data', metadata, Column('id', Integer, primary_key=True), Column('data', MutableDict.as_mutable(JSONEncodedDict)) ) Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict`` (if the type object was not an instance already), which will intercept any attributes which are mapped against this type. Below we establish a simple mapping against the ``my_data`` table:: from sqlalchemy import mapper class MyDataClass(object): pass # associates mutation listeners with MyDataClass.data mapper(MyDataClass, my_data) The ``MyDataClass.data`` member will now be notified of in place changes to its value. There's no difference in usage when using declarative:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class MyDataClass(Base): __tablename__ = 'my_data' id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(JSONEncodedDict)) Any in-place changes to the ``MyDataClass.data`` member will flag the attribute as "dirty" on the parent object:: >>> from sqlalchemy.orm import Session >>> sess = Session() >>> m1 = MyDataClass(data={'value1':'foo'}) >>> sess.add(m1) >>> sess.commit() >>> m1.data['value1'] = 'bar' >>> assert m1 in sess.dirty True The ``MutableDict`` can be associated with all future instances of ``JSONEncodedDict`` in one step, using :meth:`~.Mutable.associate_with`. This is similar to :meth:`~.Mutable.as_mutable` except it will intercept all occurrences of ``MutableDict`` in all mappings unconditionally, without the need to declare it individually:: MutableDict.associate_with(JSONEncodedDict) class MyDataClass(Base): __tablename__ = 'my_data' id = Column(Integer, primary_key=True) data = Column(JSONEncodedDict) Supporting Pickling -------------------- The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the placement of a ``weakref.WeakKeyDictionary`` upon the value object, which stores a mapping of parent mapped objects keyed to the attribute name under which they are associated with this value. ``WeakKeyDictionary`` objects are not picklable, due to the fact that they contain weakrefs and function callbacks. In our case, this is a good thing, since if this dictionary were picklable, it could lead to an excessively large pickle size for our value objects that are pickled by themselves outside of the context of the parent. The developer responsibility here is only to provide a ``__getstate__`` method that excludes the :meth:`~MutableBase._parents` collection from the pickle stream:: class MyMutableType(Mutable): def __getstate__(self): d = self.__dict__.copy() d.pop('_parents', None) return d With our dictionary example, we need to return the contents of the dict itself (and also restore them on __setstate__):: class MutableDict(Mutable, dict): # .... def __getstate__(self): return dict(self) def __setstate__(self, state): self.update(state) In the case that our mutable value object is pickled as it is attached to one or more parent objects that are also part of the pickle, the :class:`.Mutable` mixin will re-establish the :attr:`.Mutable._parents` collection on each value object as the owning parents themselves are unpickled. Receiving Events ---------------- The :meth:`.AttributeEvents.modified` event handler may be used to receive an event when a mutable scalar emits a change event. This event handler is called when the :func:`.attributes.flag_modified` function is called from within the mutable extension:: from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import event Base = declarative_base() class MyDataClass(Base): __tablename__ = 'my_data' id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(JSONEncodedDict)) @event.listens_for(MyDataClass.data, "modified") def modified_json(instance): print("json value modified:", instance.data) .. _mutable_composites: Establishing Mutability on Composites ===================================== Composites are a special ORM feature which allow a single scalar attribute to be assigned an object value which represents information "composed" from one or more columns from the underlying mapped table. The usual example is that of a geometric "point", and is introduced in :ref:`mapper_composite`. As is the case with :class:`.Mutable`, the user-defined composite class subclasses :class:`.MutableComposite` as a mixin, and detects and delivers change events to its parents via the :meth:`.MutableComposite.changed` method. In the case of a composite class, the detection is usually via the usage of Python descriptors (i.e. ``@property``), or alternatively via the special Python method ``__setattr__()``. Below we expand upon the ``Point`` class introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite` and to also route attribute set events via ``__setattr__`` to the :meth:`.MutableComposite.changed` method:: from sqlalchemy.ext.mutable import MutableComposite class Point(MutableComposite): def __init__(self, x, y): self.x = x self.y = y def __setattr__(self, key, value): "Intercept set events" # set the attribute object.__setattr__(self, key, value) # alert all parents to the change self.changed() def __composite_values__(self): return self.x, self.y def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y def __ne__(self, other): return not self.__eq__(other) The :class:`.MutableComposite` class uses a Python metaclass to automatically establish listeners for any usage of :func:`_orm.composite` that specifies our ``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class, listeners are established which will route change events from ``Point`` objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes:: from sqlalchemy.orm import composite, mapper from sqlalchemy import Table, Column vertices = Table('vertices', metadata, Column('id', Integer, primary_key=True), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) class Vertex(object): pass mapper(Vertex, vertices, properties={ 'start': composite(Point, vertices.c.x1, vertices.c.y1), 'end': composite(Point, vertices.c.x2, vertices.c.y2) }) Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members will flag the attribute as "dirty" on the parent object:: >>> from sqlalchemy.orm import Session >>> sess = Session() >>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15)) >>> sess.add(v1) >>> sess.commit() >>> v1.end.x = 8 >>> assert v1 in sess.dirty True Coercing Mutable Composites --------------------------- The :meth:`.MutableBase.coerce` method is also supported on composite types. In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce` method is only called for attribute set operations, not load operations. Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent to using a :func:`.validates` validation routine for all attributes which make use of the custom composite type:: class Point(MutableComposite): # other Point methods # ... def coerce(cls, key, value): if isinstance(value, tuple): value = Point(*value) elif not isinstance(value, Point): raise ValueError("tuple or Point expected") return value Supporting Pickling -------------------- As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper class uses a ``weakref.WeakKeyDictionary`` available via the :meth:`MutableBase._parents` attribute which isn't picklable. If we need to pickle instances of ``Point`` or its owning class ``Vertex``, we at least need to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary. Below we define both a ``__getstate__`` and a ``__setstate__`` that package up the minimal form of our ``Point`` class:: class Point(MutableComposite): # ... def __getstate__(self): return self.x, self.y def __setstate__(self, state): self.x, self.y = state As with :class:`.Mutable`, the :class:`.MutableComposite` augments the pickling process of the parent's object-relational state so that the :meth:`MutableBase._parents` collection is restored to all ``Point`` objects. """ import weakref from .. import event from .. import types from ..orm import Mapper from ..orm import mapper from ..orm import object_mapper from ..orm.attributes import flag_modified from ..sql.base import SchemaEventTarget from ..util import memoized_property class MutableBase(object): """Common base class to :class:`.Mutable` and :class:`.MutableComposite`. """ @memoized_property def _parents(self): """Dictionary of parent object->attribute name on the parent. This attribute is a so-called "memoized" property. It initializes itself with a new ``weakref.WeakKeyDictionary`` the first time it is accessed, returning the same object upon subsequent access. """ return weakref.WeakKeyDictionary() @classmethod def coerce(cls, key, value): """Given a value, coerce it into the target type. Can be overridden by custom subclasses to coerce incoming data into a particular type. By default, raises ``ValueError``. This method is called in different scenarios depending on if the parent class is of type :class:`.Mutable` or of type :class:`.MutableComposite`. In the case of the former, it is called for both attribute-set operations as well as during ORM loading operations. For the latter, it is only called during attribute-set operations; the mechanics of the :func:`.composite` construct handle coercion during load operations. :param key: string name of the ORM-mapped attribute being set. :param value: the incoming value. :return: the method should return the coerced value, or raise ``ValueError`` if the coercion cannot be completed. """ if value is None: return None msg = "Attribute '%s' does not accept objects of type %s" raise ValueError(msg % (key, type(value))) @classmethod def _get_listen_keys(cls, attribute): """Given a descriptor attribute, return a ``set()`` of the attribute keys which indicate a change in the state of this attribute. This is normally just ``set([attribute.key])``, but can be overridden to provide for additional keys. E.g. a :class:`.MutableComposite` augments this set with the attribute keys associated with the columns that comprise the composite value. This collection is consulted in the case of intercepting the :meth:`.InstanceEvents.refresh` and :meth:`.InstanceEvents.refresh_flush` events, which pass along a list of attribute names that have been refreshed; the list is compared against this set to determine if action needs to be taken. .. versionadded:: 1.0.5 """ return {attribute.key} @classmethod def _listen_on_attribute(cls, attribute, coerce, parent_cls): """Establish this type as a mutation listener for the given mapped descriptor. """ key = attribute.key if parent_cls is not attribute.class_: return # rely on "propagate" here parent_cls = attribute.class_ listen_keys = cls._get_listen_keys(attribute) def load(state, *args): """Listen for objects loaded or refreshed. Wrap the target data member's value with ``Mutable``. """ val = state.dict.get(key, None) if val is not None: if coerce: val = cls.coerce(key, val) state.dict[key] = val val._parents[state.obj()] = key def load_attrs(state, ctx, attrs): if not attrs or listen_keys.intersection(attrs): load(state) def set_(target, value, oldvalue, initiator): """Listen for set/replace events on the target data member. Establish a weak reference to the parent object on the incoming value, remove it for the one outgoing. """ if value is oldvalue: return value if not isinstance(value, cls): value = cls.coerce(key, value) if value is not None: value._parents[target.obj()] = key if isinstance(oldvalue, cls): oldvalue._parents.pop(target.obj(), None) return value def pickle(state, state_dict): val = state.dict.get(key, None) if val is not None: if "ext.mutable.values" not in state_dict: state_dict["ext.mutable.values"] = [] state_dict["ext.mutable.values"].append(val) def unpickle(state, state_dict): if "ext.mutable.values" in state_dict: for val in state_dict["ext.mutable.values"]: val._parents[state.obj()] = key event.listen(parent_cls, "load", load, raw=True, propagate=True) event.listen( parent_cls, "refresh", load_attrs, raw=True, propagate=True ) event.listen( parent_cls, "refresh_flush", load_attrs, raw=True, propagate=True ) event.listen( attribute, "set", set_, raw=True, retval=True, propagate=True ) event.listen(parent_cls, "pickle", pickle, raw=True, propagate=True) event.listen( parent_cls, "unpickle", unpickle, raw=True, propagate=True ) class Mutable(MutableBase): """Mixin that defines transparent propagation of change events to a parent object. See the example in :ref:`mutable_scalars` for usage information. """ def changed(self): """Subclasses should call this method whenever change events occur.""" for parent, key in self._parents.items(): flag_modified(parent, key) @classmethod def associate_with_attribute(cls, attribute): """Establish this type as a mutation listener for the given mapped descriptor. """ cls._listen_on_attribute(attribute, True, attribute.class_) @classmethod def associate_with(cls, sqltype): """Associate this wrapper with all future mapped columns of the given type. This is a convenience method that calls ``associate_with_attribute`` automatically. .. warning:: The listeners established by this method are *global* to all mappers, and are *not* garbage collected. Only use :meth:`.associate_with` for types that are permanent to an application, not with ad-hoc types else this will cause unbounded growth in memory usage. """ def listen_for_type(mapper, class_): if mapper.non_primary: return for prop in mapper.column_attrs: if isinstance(prop.columns[0].type, sqltype): cls.associate_with_attribute(getattr(class_, prop.key)) event.listen(mapper, "mapper_configured", listen_for_type) @classmethod def as_mutable(cls, sqltype): """Associate a SQL type with this mutable Python type. This establishes listeners that will detect ORM mappings against the given type, adding mutation event trackers to those mappings. The type is returned, unconditionally as an instance, so that :meth:`.as_mutable` can be used inline:: Table('mytable', metadata, Column('id', Integer, primary_key=True), Column('data', MyMutableType.as_mutable(PickleType)) ) Note that the returned type is always an instance, even if a class is given, and that only columns which are declared specifically with that type instance receive additional instrumentation. To associate a particular mutable type with all occurrences of a particular type, use the :meth:`.Mutable.associate_with` classmethod of the particular :class:`.Mutable` subclass to establish a global association. .. warning:: The listeners established by this method are *global* to all mappers, and are *not* garbage collected. Only use :meth:`.as_mutable` for types that are permanent to an application, not with ad-hoc types else this will cause unbounded growth in memory usage. """ sqltype = types.to_instance(sqltype) # a SchemaType will be copied when the Column is copied, # and we'll lose our ability to link that type back to the original. # so track our original type w/ columns if isinstance(sqltype, SchemaEventTarget): @event.listens_for(sqltype, "before_parent_attach") def _add_column_memo(sqltyp, parent): parent.info["_ext_mutable_orig_type"] = sqltyp schema_event_check = True else: schema_event_check = False def listen_for_type(mapper, class_): if mapper.non_primary: return for prop in mapper.column_attrs: if ( schema_event_check and hasattr(prop.expression, "info") and prop.expression.info.get("_ext_mutable_orig_type") is sqltype ) or (prop.columns[0].type is sqltype): cls.associate_with_attribute(getattr(class_, prop.key)) event.listen(mapper, "mapper_configured", listen_for_type) return sqltype class MutableComposite(MutableBase): """Mixin that defines transparent propagation of change events on a SQLAlchemy "composite" object to its owning parent or parents. See the example in :ref:`mutable_composites` for usage information. """ @classmethod def _get_listen_keys(cls, attribute): return {attribute.key}.union(attribute.property._attribute_keys) def changed(self): """Subclasses should call this method whenever change events occur.""" for parent, key in self._parents.items(): prop = object_mapper(parent).get_property(key) for value, attr_name in zip( self.__composite_values__(), prop._attribute_keys ): setattr(parent, attr_name, value) def _setup_composite_listener(): def _listen_for_type(mapper, class_): for prop in mapper.iterate_properties: if ( hasattr(prop, "composite_class") and isinstance(prop.composite_class, type) and issubclass(prop.composite_class, MutableComposite) ): prop.composite_class._listen_on_attribute( getattr(class_, prop.key), False, class_ ) if not event.contains(Mapper, "mapper_configured", _listen_for_type): event.listen(Mapper, "mapper_configured", _listen_for_type) _setup_composite_listener() class MutableDict(Mutable, dict): """A dictionary type that implements :class:`.Mutable`. The :class:`.MutableDict` object implements a dictionary that will emit change events to the underlying mapping when the contents of the dictionary are altered, including when values are added or removed. Note that :class:`.MutableDict` does **not** apply mutable tracking to the *values themselves* inside the dictionary. Therefore it is not a sufficient solution for the use case of tracking deep changes to a *recursive* dictionary structure, such as a JSON structure. To support this use case, build a subclass of :class:`.MutableDict` that provides appropriate coercion to the values placed in the dictionary so that they too are "mutable", and emit events up to their parent structure. .. seealso:: :class:`.MutableList` :class:`.MutableSet` """ def __setitem__(self, key, value): """Detect dictionary set events and emit change events.""" dict.__setitem__(self, key, value) self.changed() def setdefault(self, key, value): result = dict.setdefault(self, key, value) self.changed() return result def __delitem__(self, key): """Detect dictionary del events and emit change events.""" dict.__delitem__(self, key) self.changed() def update(self, *a, **kw): dict.update(self, *a, **kw) self.changed() def pop(self, *arg): result = dict.pop(self, *arg) self.changed() return result def popitem(self): result = dict.popitem(self) self.changed() return result def clear(self): dict.clear(self) self.changed() @classmethod def coerce(cls, key, value): """Convert plain dictionary to instance of this class.""" if not isinstance(value, cls): if isinstance(value, dict): return cls(value) return Mutable.coerce(key, value) else: return value def __getstate__(self): return dict(self) def __setstate__(self, state): self.update(state) class MutableList(Mutable, list): """A list type that implements :class:`.Mutable`. The :class:`.MutableList` object implements a list that will emit change events to the underlying mapping when the contents of the list are altered, including when values are added or removed. Note that :class:`.MutableList` does **not** apply mutable tracking to the *values themselves* inside the list. Therefore it is not a sufficient solution for the use case of tracking deep changes to a *recursive* mutable structure, such as a JSON structure. To support this use case, build a subclass of :class:`.MutableList` that provides appropriate coercion to the values placed in the dictionary so that they too are "mutable", and emit events up to their parent structure. .. versionadded:: 1.1 .. seealso:: :class:`.MutableDict` :class:`.MutableSet` """ def __reduce_ex__(self, proto): return (self.__class__, (list(self),)) # needed for backwards compatibility with # older pickles def __setstate__(self, state): self[:] = state def __setitem__(self, index, value): """Detect list set events and emit change events.""" list.__setitem__(self, index, value) self.changed() def __setslice__(self, start, end, value): """Detect list set events and emit change events.""" list.__setslice__(self, start, end, value) self.changed() def __delitem__(self, index): """Detect list del events and emit change events.""" list.__delitem__(self, index) self.changed() def __delslice__(self, start, end): """Detect list del events and emit change events.""" list.__delslice__(self, start, end) self.changed() def pop(self, *arg): result = list.pop(self, *arg) self.changed() return result def append(self, x): list.append(self, x) self.changed() def extend(self, x): list.extend(self, x) self.changed() def __iadd__(self, x): self.extend(x) return self def insert(self, i, x): list.insert(self, i, x) self.changed() def remove(self, i): list.remove(self, i) self.changed() def clear(self): list.clear(self) self.changed() def sort(self, **kw): list.sort(self, **kw) self.changed() def reverse(self): list.reverse(self) self.changed() @classmethod def coerce(cls, index, value): """Convert plain list to instance of this class.""" if not isinstance(value, cls): if isinstance(value, list): return cls(value) return Mutable.coerce(index, value) else: return value class MutableSet(Mutable, set): """A set type that implements :class:`.Mutable`. The :class:`.MutableSet` object implements a set that will emit change events to the underlying mapping when the contents of the set are altered, including when values are added or removed. Note that :class:`.MutableSet` does **not** apply mutable tracking to the *values themselves* inside the set. Therefore it is not a sufficient solution for the use case of tracking deep changes to a *recursive* mutable structure. To support this use case, build a subclass of :class:`.MutableSet` that provides appropriate coercion to the values placed in the dictionary so that they too are "mutable", and emit events up to their parent structure. .. versionadded:: 1.1 .. seealso:: :class:`.MutableDict` :class:`.MutableList` """ def update(self, *arg): set.update(self, *arg) self.changed() def intersection_update(self, *arg): set.intersection_update(self, *arg) self.changed() def difference_update(self, *arg): set.difference_update(self, *arg) self.changed() def symmetric_difference_update(self, *arg): set.symmetric_difference_update(self, *arg) self.changed() def __ior__(self, other): self.update(other) return self def __iand__(self, other): self.intersection_update(other) return self def __ixor__(self, other): self.symmetric_difference_update(other) return self def __isub__(self, other): self.difference_update(other) return self def add(self, elem): set.add(self, elem) self.changed() def remove(self, elem): set.remove(self, elem) self.changed() def discard(self, elem): set.discard(self, elem) self.changed() def pop(self, *arg): result = set.pop(self, *arg) self.changed() return result def clear(self): set.clear(self) self.changed() @classmethod def coerce(cls, index, value): """Convert plain set to instance of this class.""" if not isinstance(value, cls): if isinstance(value, set): return cls(value) return Mutable.coerce(index, value) else: return value def __getstate__(self): return set(self) def __setstate__(self, state): self.update(state) def __reduce_ex__(self, proto): return (self.__class__, (list(self),))
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/associationproxy.py
# ext/associationproxy.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Contain the ``AssociationProxy`` class. The ``AssociationProxy`` is a Python property object which provides transparent proxied access to the endpoint of an association object. See the example ``examples/association/proxied_association.py``. """ import operator from .. import exc from .. import inspect from .. import orm from .. import util from ..orm import collections from ..orm import interfaces from ..sql import or_ from ..sql.operators import ColumnOperators def association_proxy(target_collection, attr, **kw): r"""Return a Python property implementing a view of a target attribute which references an attribute on members of the target. The returned value is an instance of :class:`.AssociationProxy`. Implements a Python property representing a relationship as a collection of simpler values, or a scalar value. The proxied property will mimic the collection type of the target (list, dict or set), or, in the case of a one to one relationship, a simple scalar value. :param target_collection: Name of the attribute we'll proxy to. This attribute is typically mapped by :func:`~sqlalchemy.orm.relationship` to link to a target collection, but can also be a many-to-one or non-scalar relationship. :param attr: Attribute on the associated instance or instances we'll proxy for. For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, *attr*), getattr(obj2, *attr*)] If the relationship is one-to-one or otherwise uselist=False, then simply: getattr(obj, *attr*) :param creator: optional. When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the 'value' for the new instance. For dict types, two arguments are passed: key and value. If you want to construct instances differently, supply a *creator* function that takes arguments as above and returns instances. For scalar relationships, creator() will be called if the target is None. If the target is present, set operations are proxied to setattr() on the associated object. If you have an associated object with multiple attributes, you may set up multiple association proxies mapping to different attributes. See the unit tests for examples, and for examples of how creator() functions can be used to construct the scalar relationship on-demand in this situation. :param \*\*kw: Passes along any other keyword arguments to :class:`.AssociationProxy`. """ return AssociationProxy(target_collection, attr, **kw) ASSOCIATION_PROXY = util.symbol("ASSOCIATION_PROXY") """Symbol indicating an :class:`.InspectionAttr` that's of type :class:`.AssociationProxy`. Is assigned to the :attr:`.InspectionAttr.extension_type` attribute. """ class AssociationProxy(interfaces.InspectionAttrInfo): """A descriptor that presents a read/write view of an object attribute.""" is_attribute = True extension_type = ASSOCIATION_PROXY def __init__( self, target_collection, attr, creator=None, getset_factory=None, proxy_factory=None, proxy_bulk_set=None, info=None, cascade_scalar_deletes=False, ): """Construct a new :class:`.AssociationProxy`. The :func:`.association_proxy` function is provided as the usual entrypoint here, though :class:`.AssociationProxy` can be instantiated and/or subclassed directly. :param target_collection: Name of the collection we'll proxy to, usually created with :func:`_orm.relationship`. :param attr: Attribute on the collected instances we'll proxy for. For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, attr), getattr(obj2, attr)] :param creator: Optional. When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the 'value' for the new instance. For dict types, two arguments are passed: key and value. If you want to construct instances differently, supply a 'creator' function that takes arguments as above and returns instances. :param cascade_scalar_deletes: when True, indicates that setting the proxied value to ``None``, or deleting it via ``del``, should also remove the source object. Only applies to scalar attributes. Normally, removing the proxied target will not remove the proxy source, as this object may have other state that is still to be kept. .. versionadded:: 1.3 .. seealso:: :ref:`cascade_scalar_deletes` - complete usage example :param getset_factory: Optional. Proxied attribute access is automatically handled by routines that get and set values based on the `attr` argument for this proxy. If you would like to customize this behavior, you may supply a `getset_factory` callable that produces a tuple of `getter` and `setter` functions. The factory is called with two arguments, the abstract type of the underlying collection and this proxy instance. :param proxy_factory: Optional. The type of collection to emulate is determined by sniffing the target collection. If your collection type can't be determined by duck typing or you'd like to use a different collection implementation, you may supply a factory function to produce those collections. Only applicable to non-scalar relationships. :param proxy_bulk_set: Optional, use with proxy_factory. See the _set() method for details. :param info: optional, will be assigned to :attr:`.AssociationProxy.info` if present. .. versionadded:: 1.0.9 """ self.target_collection = target_collection self.value_attr = attr self.creator = creator self.getset_factory = getset_factory self.proxy_factory = proxy_factory self.proxy_bulk_set = proxy_bulk_set self.cascade_scalar_deletes = cascade_scalar_deletes self.key = "_%s_%s_%s" % ( type(self).__name__, target_collection, id(self), ) if info: self.info = info def __get__(self, obj, class_): if class_ is None: return self inst = self._as_instance(class_, obj) if inst: return inst.get(obj) # obj has to be None here # assert obj is None return self def __set__(self, obj, values): class_ = type(obj) return self._as_instance(class_, obj).set(obj, values) def __delete__(self, obj): class_ = type(obj) return self._as_instance(class_, obj).delete(obj) def for_class(self, class_, obj=None): r"""Return the internal state local to a specific mapped class. E.g., given a class ``User``:: class User(Base): # ... keywords = association_proxy('kws', 'keyword') If we access this :class:`.AssociationProxy` from :attr:`_orm.Mapper.all_orm_descriptors`, and we want to view the target class for this proxy as mapped by ``User``:: inspect(User).all_orm_descriptors["keywords"].for_class(User).target_class This returns an instance of :class:`.AssociationProxyInstance` that is specific to the ``User`` class. The :class:`.AssociationProxy` object remains agnostic of its parent class. :param class\_: the class that we are returning state for. :param obj: optional, an instance of the class that is required if the attribute refers to a polymorphic target, e.g. where we have to look at the type of the actual destination object to get the complete path. .. versionadded:: 1.3 - :class:`.AssociationProxy` no longer stores any state specific to a particular parent class; the state is now stored in per-class :class:`.AssociationProxyInstance` objects. """ return self._as_instance(class_, obj) def _as_instance(self, class_, obj): try: inst = class_.__dict__[self.key + "_inst"] except KeyError: inst = None # avoid exception context if inst is None: owner = self._calc_owner(class_) if owner is not None: inst = AssociationProxyInstance.for_proxy(self, owner, obj) setattr(class_, self.key + "_inst", inst) else: inst = None if inst is not None and not inst._is_canonical: # the AssociationProxyInstance can't be generalized # since the proxied attribute is not on the targeted # class, only on subclasses of it, which might be # different. only return for the specific # object's current value return inst._non_canonical_get_for_object(obj) else: return inst def _calc_owner(self, target_cls): # we might be getting invoked for a subclass # that is not mapped yet, in some declarative situations. # save until we are mapped try: insp = inspect(target_cls) except exc.NoInspectionAvailable: # can't find a mapper, don't set owner. if we are a not-yet-mapped # subclass, we can also scan through __mro__ to find a mapped # class, but instead just wait for us to be called again against a # mapped class normally. return None else: return insp.mapper.class_manager.class_ def _default_getset(self, collection_class): attr = self.value_attr _getter = operator.attrgetter(attr) def getter(target): return _getter(target) if target is not None else None if collection_class is dict: def setter(o, k, v): setattr(o, attr, v) else: def setter(o, v): setattr(o, attr, v) return getter, setter def __repr__(self): return "AssociationProxy(%r, %r)" % ( self.target_collection, self.value_attr, ) class AssociationProxyInstance(object): """A per-class object that serves class- and object-specific results. This is used by :class:`.AssociationProxy` when it is invoked in terms of a specific class or instance of a class, i.e. when it is used as a regular Python descriptor. When referring to the :class:`.AssociationProxy` as a normal Python descriptor, the :class:`.AssociationProxyInstance` is the object that actually serves the information. Under normal circumstances, its presence is transparent:: >>> User.keywords.scalar False In the special case that the :class:`.AssociationProxy` object is being accessed directly, in order to get an explicit handle to the :class:`.AssociationProxyInstance`, use the :meth:`.AssociationProxy.for_class` method:: proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User) # view if proxy object is scalar or not >>> proxy_state.scalar False .. versionadded:: 1.3 """ # noqa def __init__(self, parent, owning_class, target_class, value_attr): self.parent = parent self.key = parent.key self.owning_class = owning_class self.target_collection = parent.target_collection self.collection_class = None self.target_class = target_class self.value_attr = value_attr target_class = None """The intermediary class handled by this :class:`.AssociationProxyInstance`. Intercepted append/set/assignment events will result in the generation of new instances of this class. """ @classmethod def for_proxy(cls, parent, owning_class, parent_instance): target_collection = parent.target_collection value_attr = parent.value_attr prop = orm.class_mapper(owning_class).get_property(target_collection) # this was never asserted before but this should be made clear. if not isinstance(prop, orm.RelationshipProperty): util.raise_( NotImplementedError( "association proxy to a non-relationship " "intermediary is not supported" ), replace_context=None, ) target_class = prop.mapper.class_ try: target_assoc = cls._cls_unwrap_target_assoc_proxy( target_class, value_attr ) except AttributeError: # the proxied attribute doesn't exist on the target class; # return an "ambiguous" instance that will work on a per-object # basis return AmbiguousAssociationProxyInstance( parent, owning_class, target_class, value_attr ) else: return cls._construct_for_assoc( target_assoc, parent, owning_class, target_class, value_attr ) @classmethod def _construct_for_assoc( cls, target_assoc, parent, owning_class, target_class, value_attr ): if target_assoc is not None: return ObjectAssociationProxyInstance( parent, owning_class, target_class, value_attr ) attr = getattr(target_class, value_attr) if not hasattr(attr, "_is_internal_proxy"): return AmbiguousAssociationProxyInstance( parent, owning_class, target_class, value_attr ) is_object = attr._impl_uses_objects if is_object: return ObjectAssociationProxyInstance( parent, owning_class, target_class, value_attr ) else: return ColumnAssociationProxyInstance( parent, owning_class, target_class, value_attr ) def _get_property(self): return orm.class_mapper(self.owning_class).get_property( self.target_collection ) @property def _comparator(self): return self._get_property().comparator @classmethod def _cls_unwrap_target_assoc_proxy(cls, target_class, value_attr): attr = getattr(target_class, value_attr) if isinstance(attr, (AssociationProxy, AssociationProxyInstance)): return attr return None @util.memoized_property def _unwrap_target_assoc_proxy(self): return self._cls_unwrap_target_assoc_proxy( self.target_class, self.value_attr ) @property def remote_attr(self): """The 'remote' class attribute referenced by this :class:`.AssociationProxyInstance`. .. seealso:: :attr:`.AssociationProxyInstance.attr` :attr:`.AssociationProxyInstance.local_attr` """ return getattr(self.target_class, self.value_attr) @property def local_attr(self): """The 'local' class attribute referenced by this :class:`.AssociationProxyInstance`. .. seealso:: :attr:`.AssociationProxyInstance.attr` :attr:`.AssociationProxyInstance.remote_attr` """ return getattr(self.owning_class, self.target_collection) @property def attr(self): """Return a tuple of ``(local_attr, remote_attr)``. This attribute is convenient when specifying a join using :meth:`_query.Query.join` across two relationships:: sess.query(Parent).join(*Parent.proxied.attr) .. seealso:: :attr:`.AssociationProxyInstance.local_attr` :attr:`.AssociationProxyInstance.remote_attr` """ return (self.local_attr, self.remote_attr) @util.memoized_property def scalar(self): """Return ``True`` if this :class:`.AssociationProxyInstance` proxies a scalar relationship on the local side.""" scalar = not self._get_property().uselist if scalar: self._initialize_scalar_accessors() return scalar @util.memoized_property def _value_is_scalar(self): return ( not self._get_property() .mapper.get_property(self.value_attr) .uselist ) @property def _target_is_object(self): raise NotImplementedError() def _initialize_scalar_accessors(self): if self.parent.getset_factory: get, set_ = self.parent.getset_factory(None, self) else: get, set_ = self.parent._default_getset(None) self._scalar_get, self._scalar_set = get, set_ def _default_getset(self, collection_class): attr = self.value_attr _getter = operator.attrgetter(attr) def getter(target): return _getter(target) if target is not None else None if collection_class is dict: def setter(o, k, v): return setattr(o, attr, v) else: def setter(o, v): return setattr(o, attr, v) return getter, setter @property def info(self): return self.parent.info def get(self, obj): if obj is None: return self if self.scalar: target = getattr(obj, self.target_collection) return self._scalar_get(target) else: try: # If the owning instance is reborn (orm session resurrect, # etc.), refresh the proxy cache. creator_id, self_id, proxy = getattr(obj, self.key) except AttributeError: pass else: if id(obj) == creator_id and id(self) == self_id: assert self.collection_class is not None return proxy self.collection_class, proxy = self._new( _lazy_collection(obj, self.target_collection) ) setattr(obj, self.key, (id(obj), id(self), proxy)) return proxy def set(self, obj, values): if self.scalar: creator = ( self.parent.creator if self.parent.creator else self.target_class ) target = getattr(obj, self.target_collection) if target is None: if values is None: return setattr(obj, self.target_collection, creator(values)) else: self._scalar_set(target, values) if values is None and self.parent.cascade_scalar_deletes: setattr(obj, self.target_collection, None) else: proxy = self.get(obj) assert self.collection_class is not None if proxy is not values: proxy._bulk_replace(self, values) def delete(self, obj): if self.owning_class is None: self._calc_owner(obj, None) if self.scalar: target = getattr(obj, self.target_collection) if target is not None: delattr(target, self.value_attr) delattr(obj, self.target_collection) def _new(self, lazy_collection): creator = ( self.parent.creator if self.parent.creator else self.target_class ) collection_class = util.duck_type_collection(lazy_collection()) if self.parent.proxy_factory: return ( collection_class, self.parent.proxy_factory( lazy_collection, creator, self.value_attr, self ), ) if self.parent.getset_factory: getter, setter = self.parent.getset_factory(collection_class, self) else: getter, setter = self.parent._default_getset(collection_class) if collection_class is list: return ( collection_class, _AssociationList( lazy_collection, creator, getter, setter, self ), ) elif collection_class is dict: return ( collection_class, _AssociationDict( lazy_collection, creator, getter, setter, self ), ) elif collection_class is set: return ( collection_class, _AssociationSet( lazy_collection, creator, getter, setter, self ), ) else: raise exc.ArgumentError( "could not guess which interface to use for " 'collection_class "%s" backing "%s"; specify a ' "proxy_factory and proxy_bulk_set manually" % (self.collection_class.__name__, self.target_collection) ) def _set(self, proxy, values): if self.parent.proxy_bulk_set: self.parent.proxy_bulk_set(proxy, values) elif self.collection_class is list: proxy.extend(values) elif self.collection_class is dict: proxy.update(values) elif self.collection_class is set: proxy.update(values) else: raise exc.ArgumentError( "no proxy_bulk_set supplied for custom " "collection_class implementation" ) def _inflate(self, proxy): creator = ( self.parent.creator and self.parent.creator or self.target_class ) if self.parent.getset_factory: getter, setter = self.parent.getset_factory( self.collection_class, self ) else: getter, setter = self.parent._default_getset(self.collection_class) proxy.creator = creator proxy.getter = getter proxy.setter = setter def _criterion_exists(self, criterion=None, **kwargs): is_has = kwargs.pop("is_has", None) target_assoc = self._unwrap_target_assoc_proxy if target_assoc is not None: inner = target_assoc._criterion_exists( criterion=criterion, **kwargs ) return self._comparator._criterion_exists(inner) if self._target_is_object: prop = getattr(self.target_class, self.value_attr) value_expr = prop._criterion_exists(criterion, **kwargs) else: if kwargs: raise exc.ArgumentError( "Can't apply keyword arguments to column-targeted " "association proxy; use ==" ) elif is_has and criterion is not None: raise exc.ArgumentError( "Non-empty has() not allowed for " "column-targeted association proxy; use ==" ) value_expr = criterion return self._comparator._criterion_exists(value_expr) def any(self, criterion=None, **kwargs): """Produce a proxied 'any' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` and/or :meth:`.RelationshipProperty.Comparator.has` operators of the underlying proxied attributes. """ if self._unwrap_target_assoc_proxy is None and ( self.scalar and (not self._target_is_object or self._value_is_scalar) ): raise exc.InvalidRequestError( "'any()' not implemented for scalar " "attributes. Use has()." ) return self._criterion_exists( criterion=criterion, is_has=False, **kwargs ) def has(self, criterion=None, **kwargs): """Produce a proxied 'has' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` and/or :meth:`.RelationshipProperty.Comparator.has` operators of the underlying proxied attributes. """ if self._unwrap_target_assoc_proxy is None and ( not self.scalar or (self._target_is_object and not self._value_is_scalar) ): raise exc.InvalidRequestError( "'has()' not implemented for collections. " "Use any()." ) return self._criterion_exists( criterion=criterion, is_has=True, **kwargs ) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.parent) class AmbiguousAssociationProxyInstance(AssociationProxyInstance): """an :class:`.AssociationProxyInstance` where we cannot determine the type of target object. """ _is_canonical = False def _ambiguous(self): raise AttributeError( "Association proxy %s.%s refers to an attribute '%s' that is not " "directly mapped on class %s; therefore this operation cannot " "proceed since we don't know what type of object is referred " "towards" % ( self.owning_class.__name__, self.target_collection, self.value_attr, self.target_class, ) ) def get(self, obj): if obj is None: return self else: return super(AmbiguousAssociationProxyInstance, self).get(obj) def __eq__(self, obj): self._ambiguous() def __ne__(self, obj): self._ambiguous() def any(self, criterion=None, **kwargs): self._ambiguous() def has(self, criterion=None, **kwargs): self._ambiguous() @util.memoized_property def _lookup_cache(self): # mapping of <subclass>->AssociationProxyInstance. # e.g. proxy is A-> A.b -> B -> B.b_attr, but B.b_attr doesn't exist; # only B1(B) and B2(B) have "b_attr", keys in here would be B1, B2 return {} def _non_canonical_get_for_object(self, parent_instance): if parent_instance is not None: actual_obj = getattr(parent_instance, self.target_collection) if actual_obj is not None: try: insp = inspect(actual_obj) except exc.NoInspectionAvailable: pass else: mapper = insp.mapper instance_class = mapper.class_ if instance_class not in self._lookup_cache: self._populate_cache(instance_class, mapper) try: return self._lookup_cache[instance_class] except KeyError: pass # no object or ambiguous object given, so return "self", which # is a proxy with generally only instance-level functionality return self def _populate_cache(self, instance_class, mapper): prop = orm.class_mapper(self.owning_class).get_property( self.target_collection ) if mapper.isa(prop.mapper): target_class = instance_class try: target_assoc = self._cls_unwrap_target_assoc_proxy( target_class, self.value_attr ) except AttributeError: pass else: self._lookup_cache[instance_class] = self._construct_for_assoc( target_assoc, self.parent, self.owning_class, target_class, self.value_attr, ) class ObjectAssociationProxyInstance(AssociationProxyInstance): """an :class:`.AssociationProxyInstance` that has an object as a target. """ _target_is_object = True _is_canonical = True def contains(self, obj): """Produce a proxied 'contains' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` , :meth:`.RelationshipProperty.Comparator.has`, and/or :meth:`.RelationshipProperty.Comparator.contains` operators of the underlying proxied attributes. """ target_assoc = self._unwrap_target_assoc_proxy if target_assoc is not None: return self._comparator._criterion_exists( target_assoc.contains(obj) if not target_assoc.scalar else target_assoc == obj ) elif ( self._target_is_object and self.scalar and not self._value_is_scalar ): return self._comparator.has( getattr(self.target_class, self.value_attr).contains(obj) ) elif self._target_is_object and self.scalar and self._value_is_scalar: raise exc.InvalidRequestError( "contains() doesn't apply to a scalar object endpoint; use ==" ) else: return self._comparator._criterion_exists(**{self.value_attr: obj}) def __eq__(self, obj): # note the has() here will fail for collections; eq_() # is only allowed with a scalar. if obj is None: return or_( self._comparator.has(**{self.value_attr: obj}), self._comparator == None, ) else: return self._comparator.has(**{self.value_attr: obj}) def __ne__(self, obj): # note the has() here will fail for collections; eq_() # is only allowed with a scalar. return self._comparator.has( getattr(self.target_class, self.value_attr) != obj ) class ColumnAssociationProxyInstance( ColumnOperators, AssociationProxyInstance ): """an :class:`.AssociationProxyInstance` that has a database column as a target. """ _target_is_object = False _is_canonical = True def __eq__(self, other): # special case "is None" to check for no related row as well expr = self._criterion_exists( self.remote_attr.operate(operator.eq, other) ) if other is None: return or_(expr, self._comparator == None) else: return expr def operate(self, op, *other, **kwargs): return self._criterion_exists( self.remote_attr.operate(op, *other, **kwargs) ) class _lazy_collection(object): def __init__(self, obj, target): self.parent = obj self.target = target def __call__(self): return getattr(self.parent, self.target) def __getstate__(self): return {"obj": self.parent, "target": self.target} def __setstate__(self, state): self.parent = state["obj"] self.target = state["target"] class _AssociationCollection(object): def __init__(self, lazy_collection, creator, getter, setter, parent): """Constructs an _AssociationCollection. This will always be a subclass of either _AssociationList, _AssociationSet, or _AssociationDict. lazy_collection A callable returning a list-based collection of entities (usually an object attribute managed by a SQLAlchemy relationship()) creator A function that creates new target entities. Given one parameter: value. This assertion is assumed:: obj = creator(somevalue) assert getter(obj) == somevalue getter A function. Given an associated object, return the 'value'. setter A function. Given an associated object and a value, store that value on the object. """ self.lazy_collection = lazy_collection self.creator = creator self.getter = getter self.setter = setter self.parent = parent col = property(lambda self: self.lazy_collection()) def __len__(self): return len(self.col) def __bool__(self): return bool(self.col) __nonzero__ = __bool__ def __getstate__(self): return {"parent": self.parent, "lazy_collection": self.lazy_collection} def __setstate__(self, state): self.parent = state["parent"] self.lazy_collection = state["lazy_collection"] self.parent._inflate(self) def _bulk_replace(self, assoc_proxy, values): self.clear() assoc_proxy._set(self, values) class _AssociationList(_AssociationCollection): """Generic, converting, list-to-list proxy.""" def _create(self, value): return self.creator(value) def _get(self, object_): return self.getter(object_) def _set(self, object_, value): return self.setter(object_, value) def __getitem__(self, index): if not isinstance(index, slice): return self._get(self.col[index]) else: return [self._get(member) for member in self.col[index]] def __setitem__(self, index, value): if not isinstance(index, slice): self._set(self.col[index], value) else: if index.stop is None: stop = len(self) elif index.stop < 0: stop = len(self) + index.stop else: stop = index.stop step = index.step or 1 start = index.start or 0 rng = list(range(index.start or 0, stop, step)) if step == 1: for i in rng: del self[start] i = start for item in value: self.insert(i, item) i += 1 else: if len(value) != len(rng): raise ValueError( "attempt to assign sequence of size %s to " "extended slice of size %s" % (len(value), len(rng)) ) for i, item in zip(rng, value): self._set(self.col[i], item) def __delitem__(self, index): del self.col[index] def __contains__(self, value): for member in self.col: # testlib.pragma exempt:__eq__ if self._get(member) == value: return True return False def __getslice__(self, start, end): return [self._get(member) for member in self.col[start:end]] def __setslice__(self, start, end, values): members = [self._create(v) for v in values] self.col[start:end] = members def __delslice__(self, start, end): del self.col[start:end] def __iter__(self): """Iterate over proxied values. For the actual domain objects, iterate over .col instead or just use the underlying collection directly from its property on the parent. """ for member in self.col: yield self._get(member) return def append(self, value): col = self.col item = self._create(value) col.append(item) def count(self, value): return sum( [ 1 for _ in util.itertools_filter( lambda v: v == value, iter(self) ) ] ) def extend(self, values): for v in values: self.append(v) def insert(self, index, value): self.col[index:index] = [self._create(value)] def pop(self, index=-1): return self.getter(self.col.pop(index)) def remove(self, value): for i, val in enumerate(self): if val == value: del self.col[i] return raise ValueError("value not in list") def reverse(self): """Not supported, use reversed(mylist)""" raise NotImplementedError def sort(self): """Not supported, use sorted(mylist)""" raise NotImplementedError def clear(self): del self.col[0 : len(self.col)] def __eq__(self, other): return list(self) == other def __ne__(self, other): return list(self) != other def __lt__(self, other): return list(self) < other def __le__(self, other): return list(self) <= other def __gt__(self, other): return list(self) > other def __ge__(self, other): return list(self) >= other def __cmp__(self, other): return util.cmp(list(self), other) def __add__(self, iterable): try: other = list(iterable) except TypeError: return NotImplemented return list(self) + other def __radd__(self, iterable): try: other = list(iterable) except TypeError: return NotImplemented return other + list(self) def __mul__(self, n): if not isinstance(n, int): return NotImplemented return list(self) * n __rmul__ = __mul__ def __iadd__(self, iterable): self.extend(iterable) return self def __imul__(self, n): # unlike a regular list *=, proxied __imul__ will generate unique # backing objects for each copy. *= on proxied lists is a bit of # a stretch anyhow, and this interpretation of the __imul__ contract # is more plausibly useful than copying the backing objects. if not isinstance(n, int): return NotImplemented if n == 0: self.clear() elif n > 1: self.extend(list(self) * (n - 1)) return self def index(self, item, *args): return list(self).index(item, *args) def copy(self): return list(self) def __repr__(self): return repr(list(self)) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in list(locals().items()): if ( util.callable(func) and func.__name__ == func_name and not func.__doc__ and hasattr(list, func_name) ): func.__doc__ = getattr(list, func_name).__doc__ del func_name, func _NotProvided = util.symbol("_NotProvided") class _AssociationDict(_AssociationCollection): """Generic, converting, dict-to-dict proxy.""" def _create(self, key, value): return self.creator(key, value) def _get(self, object_): return self.getter(object_) def _set(self, object_, key, value): return self.setter(object_, key, value) def __getitem__(self, key): return self._get(self.col[key]) def __setitem__(self, key, value): if key in self.col: self._set(self.col[key], key, value) else: self.col[key] = self._create(key, value) def __delitem__(self, key): del self.col[key] def __contains__(self, key): # testlib.pragma exempt:__hash__ return key in self.col def has_key(self, key): # testlib.pragma exempt:__hash__ return key in self.col def __iter__(self): return iter(self.col.keys()) def clear(self): self.col.clear() def __eq__(self, other): return dict(self) == other def __ne__(self, other): return dict(self) != other def __lt__(self, other): return dict(self) < other def __le__(self, other): return dict(self) <= other def __gt__(self, other): return dict(self) > other def __ge__(self, other): return dict(self) >= other def __cmp__(self, other): return util.cmp(dict(self), other) def __repr__(self): return repr(dict(self.items())) def get(self, key, default=None): try: return self[key] except KeyError: return default def setdefault(self, key, default=None): if key not in self.col: self.col[key] = self._create(key, default) return default else: return self[key] def keys(self): return self.col.keys() if util.py2k: def iteritems(self): return ((key, self._get(self.col[key])) for key in self.col) def itervalues(self): return (self._get(self.col[key]) for key in self.col) def iterkeys(self): return self.col.iterkeys() def values(self): return [self._get(member) for member in self.col.values()] def items(self): return [(k, self._get(self.col[k])) for k in self] else: def items(self): return ((key, self._get(self.col[key])) for key in self.col) def values(self): return (self._get(self.col[key]) for key in self.col) def pop(self, key, default=_NotProvided): if default is _NotProvided: member = self.col.pop(key) else: member = self.col.pop(key, default) return self._get(member) def popitem(self): item = self.col.popitem() return (item[0], self._get(item[1])) def update(self, *a, **kw): if len(a) > 1: raise TypeError( "update expected at most 1 arguments, got %i" % len(a) ) elif len(a) == 1: seq_or_map = a[0] # discern dict from sequence - took the advice from # http://www.voidspace.org.uk/python/articles/duck_typing.shtml # still not perfect :( if hasattr(seq_or_map, "keys"): for item in seq_or_map: self[item] = seq_or_map[item] else: try: for k, v in seq_or_map: self[k] = v except ValueError as err: util.raise_( ValueError( "dictionary update sequence " "requires 2-element tuples" ), replace_context=err, ) for key, value in kw: self[key] = value def _bulk_replace(self, assoc_proxy, values): existing = set(self) constants = existing.intersection(values or ()) additions = set(values or ()).difference(constants) removals = existing.difference(constants) for key, member in values.items() or (): if key in additions: self[key] = member elif key in constants: self[key] = member for key in removals: del self[key] def copy(self): return dict(self.items()) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in list(locals().items()): if ( util.callable(func) and func.__name__ == func_name and not func.__doc__ and hasattr(dict, func_name) ): func.__doc__ = getattr(dict, func_name).__doc__ del func_name, func class _AssociationSet(_AssociationCollection): """Generic, converting, set-to-set proxy.""" def _create(self, value): return self.creator(value) def _get(self, object_): return self.getter(object_) def __len__(self): return len(self.col) def __bool__(self): if self.col: return True else: return False __nonzero__ = __bool__ def __contains__(self, value): for member in self.col: # testlib.pragma exempt:__eq__ if self._get(member) == value: return True return False def __iter__(self): """Iterate over proxied values. For the actual domain objects, iterate over .col instead or just use the underlying collection directly from its property on the parent. """ for member in self.col: yield self._get(member) return def add(self, value): if value not in self: self.col.add(self._create(value)) # for discard and remove, choosing a more expensive check strategy rather # than call self.creator() def discard(self, value): for member in self.col: if self._get(member) == value: self.col.discard(member) break def remove(self, value): for member in self.col: if self._get(member) == value: self.col.discard(member) return raise KeyError(value) def pop(self): if not self.col: raise KeyError("pop from an empty set") member = self.col.pop() return self._get(member) def update(self, other): for value in other: self.add(value) def _bulk_replace(self, assoc_proxy, values): existing = set(self) constants = existing.intersection(values or ()) additions = set(values or ()).difference(constants) removals = existing.difference(constants) appender = self.add remover = self.remove for member in values or (): if member in additions: appender(member) elif member in constants: appender(member) for member in removals: remover(member) def __ior__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented for value in other: self.add(value) return self def _set(self): return set(iter(self)) def union(self, other): return set(self).union(other) __or__ = union def difference(self, other): return set(self).difference(other) __sub__ = difference def difference_update(self, other): for value in other: self.discard(value) def __isub__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented for value in other: self.discard(value) return self def intersection(self, other): return set(self).intersection(other) __and__ = intersection def intersection_update(self, other): want, have = self.intersection(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) def __iand__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented want, have = self.intersection(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) return self def symmetric_difference(self, other): return set(self).symmetric_difference(other) __xor__ = symmetric_difference def symmetric_difference_update(self, other): want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) def __ixor__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) return self def issubset(self, other): return set(self).issubset(other) def issuperset(self, other): return set(self).issuperset(other) def clear(self): self.col.clear() def copy(self): return set(self) def __eq__(self, other): return set(self) == other def __ne__(self, other): return set(self) != other def __lt__(self, other): return set(self) < other def __le__(self, other): return set(self) <= other def __gt__(self, other): return set(self) > other def __ge__(self, other): return set(self) >= other def __repr__(self): return repr(set(self)) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in list(locals().items()): if ( util.callable(func) and func.__name__ == func_name and not func.__doc__ and hasattr(set, func_name) ): func.__doc__ = getattr(set, func_name).__doc__ del func_name, func
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/declarative/clsregistry.py
# ext/declarative/clsregistry.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to handle the string class registry used by declarative. This system allows specification of classes and expressions used in :func:`_orm.relationship` using strings. """ import weakref from ... import exc from ... import inspection from ... import util from ...orm import class_mapper from ...orm import interfaces from ...orm.properties import ColumnProperty from ...orm.properties import RelationshipProperty from ...orm.properties import SynonymProperty from ...schema import _get_table_key # strong references to registries which we place in # the _decl_class_registry, which is usually weak referencing. # the internal registries here link to classes with weakrefs and remove # themselves when all references to contained classes are removed. _registries = set() def add_class(classname, cls): """Add a class to the _decl_class_registry associated with the given declarative class. """ if classname in cls._decl_class_registry: # class already exists. existing = cls._decl_class_registry[classname] if not isinstance(existing, _MultipleClassMarker): existing = cls._decl_class_registry[ classname ] = _MultipleClassMarker([cls, existing]) else: cls._decl_class_registry[classname] = cls try: root_module = cls._decl_class_registry["_sa_module_registry"] except KeyError: cls._decl_class_registry[ "_sa_module_registry" ] = root_module = _ModuleMarker("_sa_module_registry", None) tokens = cls.__module__.split(".") # build up a tree like this: # modulename: myapp.snacks.nuts # # myapp->snack->nuts->(classes) # snack->nuts->(classes) # nuts->(classes) # # this allows partial token paths to be used. while tokens: token = tokens.pop(0) module = root_module.get_module(token) for token in tokens: module = module.get_module(token) module.add_class(classname, cls) class _MultipleClassMarker(object): """refers to multiple classes of the same name within _decl_class_registry. """ __slots__ = "on_remove", "contents", "__weakref__" def __init__(self, classes, on_remove=None): self.on_remove = on_remove self.contents = set( [weakref.ref(item, self._remove_item) for item in classes] ) _registries.add(self) def __iter__(self): return (ref() for ref in self.contents) def attempt_get(self, path, key): if len(self.contents) > 1: raise exc.InvalidRequestError( 'Multiple classes found for path "%s" ' "in the registry of this declarative " "base. Please use a fully module-qualified path." % (".".join(path + [key])) ) else: ref = list(self.contents)[0] cls = ref() if cls is None: raise NameError(key) return cls def _remove_item(self, ref): self.contents.remove(ref) if not self.contents: _registries.discard(self) if self.on_remove: self.on_remove() def add_item(self, item): # protect against class registration race condition against # asynchronous garbage collection calling _remove_item, # [ticket:3208] modules = set( [ cls.__module__ for cls in [ref() for ref in self.contents] if cls is not None ] ) if item.__module__ in modules: util.warn( "This declarative base already contains a class with the " "same class name and module name as %s.%s, and will " "be replaced in the string-lookup table." % (item.__module__, item.__name__) ) self.contents.add(weakref.ref(item, self._remove_item)) class _ModuleMarker(object): """"refers to a module name within _decl_class_registry. """ __slots__ = "parent", "name", "contents", "mod_ns", "path", "__weakref__" def __init__(self, name, parent): self.parent = parent self.name = name self.contents = {} self.mod_ns = _ModNS(self) if self.parent: self.path = self.parent.path + [self.name] else: self.path = [] _registries.add(self) def __contains__(self, name): return name in self.contents def __getitem__(self, name): return self.contents[name] def _remove_item(self, name): self.contents.pop(name, None) if not self.contents and self.parent is not None: self.parent._remove_item(self.name) _registries.discard(self) def resolve_attr(self, key): return getattr(self.mod_ns, key) def get_module(self, name): if name not in self.contents: marker = _ModuleMarker(name, self) self.contents[name] = marker else: marker = self.contents[name] return marker def add_class(self, name, cls): if name in self.contents: existing = self.contents[name] existing.add_item(cls) else: existing = self.contents[name] = _MultipleClassMarker( [cls], on_remove=lambda: self._remove_item(name) ) class _ModNS(object): __slots__ = ("__parent",) def __init__(self, parent): self.__parent = parent def __getattr__(self, key): try: value = self.__parent.contents[key] except KeyError: pass else: if value is not None: if isinstance(value, _ModuleMarker): return value.mod_ns else: assert isinstance(value, _MultipleClassMarker) return value.attempt_get(self.__parent.path, key) raise AttributeError( "Module %r has no mapped classes " "registered under the name %r" % (self.__parent.name, key) ) class _GetColumns(object): __slots__ = ("cls",) def __init__(self, cls): self.cls = cls def __getattr__(self, key): mp = class_mapper(self.cls, configure=False) if mp: if key not in mp.all_orm_descriptors: raise exc.InvalidRequestError( "Class %r does not have a mapped column named %r" % (self.cls, key) ) desc = mp.all_orm_descriptors[key] if desc.extension_type is interfaces.NOT_EXTENSION: prop = desc.property if isinstance(prop, SynonymProperty): key = prop.name elif not isinstance(prop, ColumnProperty): raise exc.InvalidRequestError( "Property %r is not an instance of" " ColumnProperty (i.e. does not correspond" " directly to a Column)." % key ) return getattr(self.cls, key) inspection._inspects(_GetColumns)( lambda target: inspection.inspect(target.cls) ) class _GetTable(object): __slots__ = "key", "metadata" def __init__(self, key, metadata): self.key = key self.metadata = metadata def __getattr__(self, key): return self.metadata.tables[_get_table_key(key, self.key)] def _determine_container(key, value): if isinstance(value, _MultipleClassMarker): value = value.attempt_get([], key) return _GetColumns(value) class _class_resolver(object): def __init__(self, cls, prop, fallback, arg): self.cls = cls self.prop = prop self.arg = self._declarative_arg = arg self.fallback = fallback self._dict = util.PopulateDict(self._access_cls) self._resolvers = () def _access_cls(self, key): cls = self.cls if key in cls._decl_class_registry: return _determine_container(key, cls._decl_class_registry[key]) elif key in cls.metadata.tables: return cls.metadata.tables[key] elif key in cls.metadata._schemas: return _GetTable(key, cls.metadata) elif ( "_sa_module_registry" in cls._decl_class_registry and key in cls._decl_class_registry["_sa_module_registry"] ): registry = cls._decl_class_registry["_sa_module_registry"] return registry.resolve_attr(key) elif self._resolvers: for resolv in self._resolvers: value = resolv(key) if value is not None: return value return self.fallback[key] def _raise_for_name(self, name, err): util.raise_( exc.InvalidRequestError( "When initializing mapper %s, expression %r failed to " "locate a name (%r). If this is a class name, consider " "adding this relationship() to the %r class after " "both dependent classes have been defined." % (self.prop.parent, self.arg, name, self.cls) ), from_=err, ) def _resolve_name(self): name = self.arg d = self._dict rval = None try: for token in name.split("."): if rval is None: rval = d[token] else: rval = getattr(rval, token) except KeyError as err: self._raise_for_name(name, err) except NameError as n: self._raise_for_name(n.args[0], n) else: if isinstance(rval, _GetColumns): return rval.cls else: return rval def __call__(self): try: x = eval(self.arg, globals(), self._dict) if isinstance(x, _GetColumns): return x.cls else: return x except NameError as n: self._raise_for_name(n.args[0], n) def _resolver(cls, prop): import sqlalchemy from sqlalchemy.orm import foreign, remote fallback = sqlalchemy.__dict__.copy() fallback.update({"foreign": foreign, "remote": remote}) def resolve_arg(arg): return _class_resolver(cls, prop, fallback, arg) def resolve_name(arg): return _class_resolver(cls, prop, fallback, arg)._resolve_name return resolve_name, resolve_arg def _deferred_relationship(cls, prop): if isinstance(prop, RelationshipProperty): resolve_name, resolve_arg = _resolver(cls, prop) for attr in ( "order_by", "primaryjoin", "secondaryjoin", "secondary", "_user_defined_foreign_keys", "remote_side", ): v = getattr(prop, attr) if isinstance(v, util.string_types): setattr(prop, attr, resolve_arg(v)) for attr in ("argument",): v = getattr(prop, attr) if isinstance(v, util.string_types): setattr(prop, attr, resolve_name(v)) if prop.backref and isinstance(prop.backref, tuple): key, kwargs = prop.backref for attr in ( "primaryjoin", "secondaryjoin", "secondary", "foreign_keys", "remote_side", "order_by", ): if attr in kwargs and isinstance( kwargs[attr], util.string_types ): kwargs[attr] = resolve_arg(kwargs[attr]) return prop
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/declarative/__init__.py
# ext/declarative/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .api import AbstractConcreteBase from .api import as_declarative from .api import comparable_using from .api import ConcreteBase from .api import declarative_base from .api import DeclarativeMeta from .api import declared_attr from .api import DeferredReflection from .api import has_inherited_table from .api import instrument_declarative from .api import synonym_for __all__ = [ "declarative_base", "synonym_for", "has_inherited_table", "comparable_using", "instrument_declarative", "declared_attr", "as_declarative", "ConcreteBase", "AbstractConcreteBase", "DeclarativeMeta", "DeferredReflection", ]
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/declarative/api.py
# ext/declarative/api.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Public API functions and helpers for declarative.""" import re import weakref from .base import _add_attribute from .base import _as_declarative from .base import _declarative_constructor from .base import _DeferredMapperConfig from .base import _del_attribute from .clsregistry import _class_resolver from ... import exc from ... import inspection from ... import util from ...orm import attributes from ...orm import comparable_property from ...orm import exc as orm_exc from ...orm import interfaces from ...orm import properties from ...orm import synonym as _orm_synonym from ...orm.base import _inspect_mapped_class from ...orm.base import _mapper_or_none from ...orm.util import polymorphic_union from ...schema import MetaData from ...schema import Table from ...util import hybridmethod from ...util import hybridproperty from ...util import OrderedDict def instrument_declarative(cls, registry, metadata): """Given a class, configure the class declaratively, using the given registry, which can be any dictionary, and MetaData object. """ if "_decl_class_registry" in cls.__dict__: raise exc.InvalidRequestError( "Class %r already has been " "instrumented declaratively" % cls ) cls._decl_class_registry = registry cls.metadata = metadata _as_declarative(cls, cls.__name__, cls.__dict__) def has_inherited_table(cls): """Given a class, return True if any of the classes it inherits from has a mapped table, otherwise return False. This is used in declarative mixins to build attributes that behave differently for the base class vs. a subclass in an inheritance hierarchy. .. seealso:: :ref:`decl_mixin_inheritance` """ for class_ in cls.__mro__[1:]: if getattr(class_, "__table__", None) is not None: return True return False class DeclarativeMeta(type): def __init__(cls, classname, bases, dict_): if "_decl_class_registry" not in cls.__dict__: _as_declarative(cls, classname, cls.__dict__) type.__init__(cls, classname, bases, dict_) def __setattr__(cls, key, value): _add_attribute(cls, key, value) def __delattr__(cls, key): _del_attribute(cls, key) def synonym_for(name, map_column=False): """Decorator that produces an :func:`_orm.synonym` attribute in conjunction with a Python descriptor. The function being decorated is passed to :func:`_orm.synonym` as the :paramref:`.orm.synonym.descriptor` parameter:: class MyClass(Base): __tablename__ = 'my_table' id = Column(Integer, primary_key=True) _job_status = Column("job_status", String(50)) @synonym_for("job_status") @property def job_status(self): return "Status: %s" % self._job_status The :ref:`hybrid properties <mapper_hybrids>` feature of SQLAlchemy is typically preferred instead of synonyms, which is a more legacy feature. .. seealso:: :ref:`synonyms` - Overview of synonyms :func:`_orm.synonym` - the mapper-level function :ref:`mapper_hybrids` - The Hybrid Attribute extension provides an updated approach to augmenting attribute behavior more flexibly than can be achieved with synonyms. """ def decorate(fn): return _orm_synonym(name, map_column=map_column, descriptor=fn) return decorate def comparable_using(comparator_factory): """Decorator, allow a Python @property to be used in query criteria. This is a decorator front end to :func:`~sqlalchemy.orm.comparable_property` that passes through the comparator_factory and the function being decorated:: @comparable_using(MyComparatorType) @property def prop(self): return 'special sauce' The regular ``comparable_property()`` is also usable directly in a declarative setting and may be convenient for read/write properties:: prop = comparable_property(MyComparatorType) """ def decorate(fn): return comparable_property(comparator_factory, fn) return decorate class declared_attr(interfaces._MappedAttribute, property): """Mark a class-level method as representing the definition of a mapped property or special declarative member name. @declared_attr turns the attribute into a scalar-like property that can be invoked from the uninstantiated class. Declarative treats attributes specifically marked with @declared_attr as returning a construct that is specific to mapping or declarative table configuration. The name of the attribute is that of what the non-dynamic version of the attribute would be. @declared_attr is more often than not applicable to mixins, to define relationships that are to be applied to different implementors of the class:: class ProvidesUser(object): "A mixin that adds a 'user' relationship to classes." @declared_attr def user(self): return relationship("User") It also can be applied to mapped classes, such as to provide a "polymorphic" scheme for inheritance:: class Employee(Base): id = Column(Integer, primary_key=True) type = Column(String(50), nullable=False) @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __mapper_args__(cls): if cls.__name__ == 'Employee': return { "polymorphic_on":cls.type, "polymorphic_identity":"Employee" } else: return {"polymorphic_identity":cls.__name__} """ def __init__(self, fget, cascading=False): super(declared_attr, self).__init__(fget) self.__doc__ = fget.__doc__ self._cascading = cascading def __get__(desc, self, cls): reg = cls.__dict__.get("_sa_declared_attr_reg", None) if reg is None: if ( not re.match(r"^__.+__$", desc.fget.__name__) and attributes.manager_of_class(cls) is None ): util.warn( "Unmanaged access of declarative attribute %s from " "non-mapped class %s" % (desc.fget.__name__, cls.__name__) ) return desc.fget(cls) elif desc in reg: return reg[desc] else: reg[desc] = obj = desc.fget(cls) return obj @hybridmethod def _stateful(cls, **kw): return _stateful_declared_attr(**kw) @hybridproperty def cascading(cls): """Mark a :class:`.declared_attr` as cascading. This is a special-use modifier which indicates that a column or MapperProperty-based declared attribute should be configured distinctly per mapped subclass, within a mapped-inheritance scenario. .. warning:: The :attr:`.declared_attr.cascading` modifier has several limitations: * The flag **only** applies to the use of :class:`.declared_attr` on declarative mixin classes and ``__abstract__`` classes; it currently has no effect when used on a mapped class directly. * The flag **only** applies to normally-named attributes, e.g. not any special underscore attributes such as ``__tablename__``. On these attributes it has **no** effect. * The flag currently **does not allow further overrides** down the class hierarchy; if a subclass tries to override the attribute, a warning is emitted and the overridden attribute is skipped. This is a limitation that it is hoped will be resolved at some point. Below, both MyClass as well as MySubClass will have a distinct ``id`` Column object established:: class HasIdMixin(object): @declared_attr.cascading def id(cls): if has_inherited_table(cls): return Column( ForeignKey('myclass.id'), primary_key=True) else: return Column(Integer, primary_key=True) class MyClass(HasIdMixin, Base): __tablename__ = 'myclass' # ... class MySubClass(MyClass): "" # ... The behavior of the above configuration is that ``MySubClass`` will refer to both its own ``id`` column as well as that of ``MyClass`` underneath the attribute named ``some_id``. .. seealso:: :ref:`declarative_inheritance` :ref:`mixin_inheritance_columns` """ return cls._stateful(cascading=True) class _stateful_declared_attr(declared_attr): def __init__(self, **kw): self.kw = kw def _stateful(self, **kw): new_kw = self.kw.copy() new_kw.update(kw) return _stateful_declared_attr(**new_kw) def __call__(self, fn): return declared_attr(fn, **self.kw) def declarative_base( bind=None, metadata=None, mapper=None, cls=object, name="Base", constructor=_declarative_constructor, class_registry=None, metaclass=DeclarativeMeta, ): r"""Construct a base class for declarative class definitions. The new base class will be given a metaclass that produces appropriate :class:`~sqlalchemy.schema.Table` objects and makes the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the information provided declaratively in the class and any subclasses of the class. :param bind: An optional :class:`~sqlalchemy.engine.Connectable`, will be assigned the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData` instance. :param metadata: An optional :class:`~sqlalchemy.schema.MetaData` instance. All :class:`~sqlalchemy.schema.Table` objects implicitly declared by subclasses of the base will share this MetaData. A MetaData instance will be created if none is provided. The :class:`~sqlalchemy.schema.MetaData` instance will be available via the `metadata` attribute of the generated declarative base class. :param mapper: An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will be used to map subclasses to their Tables. :param cls: Defaults to :class:`object`. A type to use as the base for the generated declarative base class. May be a class or tuple of classes. :param name: Defaults to ``Base``. The display name for the generated class. Customizing this is not required, but can improve clarity in tracebacks and debugging. :param constructor: Defaults to :func:`~sqlalchemy.ext.declarative.base._declarative_constructor`, an __init__ implementation that assigns \**kwargs for declared fields and relationships to an instance. If ``None`` is supplied, no __init__ will be provided and construction will fall back to cls.__init__ by way of the normal Python semantics. :param class_registry: optional dictionary that will serve as the registry of class names-> mapped classes when string names are used to identify classes inside of :func:`_orm.relationship` and others. Allows two or more declarative base classes to share the same registry of class names for simplified inter-base relationships. :param metaclass: Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ compatible callable to use as the meta type of the generated declarative base class. .. versionchanged:: 1.1 if :paramref:`.declarative_base.cls` is a single class (rather than a tuple), the constructed base class will inherit its docstring. .. seealso:: :func:`.as_declarative` """ lcl_metadata = metadata or MetaData() if bind: lcl_metadata.bind = bind if class_registry is None: class_registry = weakref.WeakValueDictionary() bases = not isinstance(cls, tuple) and (cls,) or cls class_dict = dict( _decl_class_registry=class_registry, metadata=lcl_metadata ) if isinstance(cls, type): class_dict["__doc__"] = cls.__doc__ if constructor: class_dict["__init__"] = constructor if mapper: class_dict["__mapper_cls__"] = mapper return metaclass(name, bases, class_dict) def as_declarative(**kw): """ Class decorator for :func:`.declarative_base`. Provides a syntactical shortcut to the ``cls`` argument sent to :func:`.declarative_base`, allowing the base class to be converted in-place to a "declarative" base:: from sqlalchemy.ext.declarative import as_declarative @as_declarative() class Base(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyMappedClass(Base): # ... All keyword arguments passed to :func:`.as_declarative` are passed along to :func:`.declarative_base`. .. seealso:: :func:`.declarative_base` """ def decorate(cls): kw["cls"] = cls kw["name"] = cls.__name__ return declarative_base(**kw) return decorate class ConcreteBase(object): """A helper class for 'concrete' declarative mappings. :class:`.ConcreteBase` will use the :func:`.polymorphic_union` function automatically, against all tables mapped as a subclass to this class. The function is called via the ``__declare_last__()`` function, which is essentially a hook for the :meth:`.after_configured` event. :class:`.ConcreteBase` produces a mapped table for the class itself. Compare to :class:`.AbstractConcreteBase`, which does not. Example:: from sqlalchemy.ext.declarative import ConcreteBase class Employee(ConcreteBase, Base): __tablename__ = 'employee' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'concrete':True} class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { 'polymorphic_identity':'manager', 'concrete':True} .. seealso:: :class:`.AbstractConcreteBase` :ref:`concrete_inheritance` """ @classmethod def _create_polymorphic_union(cls, mappers): return polymorphic_union( OrderedDict( (mp.polymorphic_identity, mp.local_table) for mp in mappers ), "type", "pjoin", ) @classmethod def __declare_first__(cls): m = cls.__mapper__ if m.with_polymorphic: return mappers = list(m.self_and_descendants) pjoin = cls._create_polymorphic_union(mappers) m._set_with_polymorphic(("*", pjoin)) m._set_polymorphic_on(pjoin.c.type) class AbstractConcreteBase(ConcreteBase): """A helper class for 'concrete' declarative mappings. :class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union` function automatically, against all tables mapped as a subclass to this class. The function is called via the ``__declare_last__()`` function, which is essentially a hook for the :meth:`.after_configured` event. :class:`.AbstractConcreteBase` does produce a mapped class for the base class, however it is not persisted to any table; it is instead mapped directly to the "polymorphic" selectable directly and is only used for selecting. Compare to :class:`.ConcreteBase`, which does create a persisted table for the base class. .. note:: The :class:`.AbstractConcreteBase` class does not intend to set up the mapping for the base class until all the subclasses have been defined, as it needs to create a mapping against a selectable that will include all subclass tables. In order to achieve this, it waits for the **mapper configuration event** to occur, at which point it scans through all the configured subclasses and sets up a mapping that will query against all subclasses at once. While this event is normally invoked automatically, in the case of :class:`.AbstractConcreteBase`, it may be necessary to invoke it explicitly after **all** subclass mappings are defined, if the first operation is to be a query against this base class. To do so, invoke :func:`.configure_mappers` once all the desired classes have been configured:: from sqlalchemy.orm import configure_mappers configure_mappers() .. seealso:: :func:`_orm.configure_mappers` Example:: from sqlalchemy.ext.declarative import AbstractConcreteBase class Employee(AbstractConcreteBase, Base): pass class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { 'polymorphic_identity':'manager', 'concrete':True} configure_mappers() The abstract base class is handled by declarative in a special way; at class configuration time, it behaves like a declarative mixin or an ``__abstract__`` base class. Once classes are configured and mappings are produced, it then gets mapped itself, but after all of its descendants. This is a very unique system of mapping not found in any other SQLAlchemy system. Using this approach, we can specify columns and properties that will take place on mapped subclasses, in the way that we normally do as in :ref:`declarative_mixins`:: class Company(Base): __tablename__ = 'company' id = Column(Integer, primary_key=True) class Employee(AbstractConcreteBase, Base): employee_id = Column(Integer, primary_key=True) @declared_attr def company_id(cls): return Column(ForeignKey('company.id')) @declared_attr def company(cls): return relationship("Company") class Manager(Employee): __tablename__ = 'manager' name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { 'polymorphic_identity':'manager', 'concrete':True} configure_mappers() When we make use of our mappings however, both ``Manager`` and ``Employee`` will have an independently usable ``.company`` attribute:: session.query(Employee).filter(Employee.company.has(id=5)) .. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase` have been reworked to support relationships established directly on the abstract base, without any special configurational steps. .. seealso:: :class:`.ConcreteBase` :ref:`concrete_inheritance` """ __no_table__ = True @classmethod def __declare_first__(cls): cls._sa_decl_prepare_nocascade() @classmethod def _sa_decl_prepare_nocascade(cls): if getattr(cls, "__mapper__", None): return to_map = _DeferredMapperConfig.config_for_cls(cls) # can't rely on 'self_and_descendants' here # since technically an immediate subclass # might not be mapped, but a subclass # may be. mappers = [] stack = list(cls.__subclasses__()) while stack: klass = stack.pop() stack.extend(klass.__subclasses__()) mn = _mapper_or_none(klass) if mn is not None: mappers.append(mn) pjoin = cls._create_polymorphic_union(mappers) # For columns that were declared on the class, these # are normally ignored with the "__no_table__" mapping, # unless they have a different attribute key vs. col name # and are in the properties argument. # In that case, ensure we update the properties entry # to the correct column from the pjoin target table. declared_cols = set(to_map.declared_columns) for k, v in list(to_map.properties.items()): if v in declared_cols: to_map.properties[k] = pjoin.c[v.key] to_map.local_table = pjoin m_args = to_map.mapper_args_fn or dict def mapper_args(): args = m_args() args["polymorphic_on"] = pjoin.c.type return args to_map.mapper_args_fn = mapper_args m = to_map.map() for scls in cls.__subclasses__(): sm = _mapper_or_none(scls) if sm and sm.concrete and cls in scls.__bases__: sm._set_concrete_base(m) @classmethod def _sa_raise_deferred_config(cls): raise orm_exc.UnmappedClassError( cls, msg="Class %s is a subclass of AbstractConcreteBase and " "has a mapping pending until all subclasses are defined. " "Call the sqlalchemy.orm.configure_mappers() function after " "all subclasses have been defined to " "complete the mapping of this class." % orm_exc._safe_cls_name(cls), ) class DeferredReflection(object): """A helper class for construction of mappings based on a deferred reflection step. Normally, declarative can be used with reflection by setting a :class:`_schema.Table` object using autoload=True as the ``__table__`` attribute on a declarative class. The caveat is that the :class:`_schema.Table` must be fully reflected, or at the very least have a primary key column, at the point at which a normal declarative mapping is constructed, meaning the :class:`_engine.Engine` must be available at class declaration time. The :class:`.DeferredReflection` mixin moves the construction of mappers to be at a later point, after a specific method is called which first reflects all :class:`_schema.Table` objects created so far. Classes can define it as such:: from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.declarative import DeferredReflection Base = declarative_base() class MyClass(DeferredReflection, Base): __tablename__ = 'mytable' Above, ``MyClass`` is not yet mapped. After a series of classes have been defined in the above fashion, all tables can be reflected and mappings created using :meth:`.prepare`:: engine = create_engine("someengine://...") DeferredReflection.prepare(engine) The :class:`.DeferredReflection` mixin can be applied to individual classes, used as the base for the declarative base itself, or used in a custom abstract class. Using an abstract base allows that only a subset of classes to be prepared for a particular prepare step, which is necessary for applications that use more than one engine. For example, if an application has two engines, you might use two bases, and prepare each separately, e.g.:: class ReflectedOne(DeferredReflection, Base): __abstract__ = True class ReflectedTwo(DeferredReflection, Base): __abstract__ = True class MyClass(ReflectedOne): __tablename__ = 'mytable' class MyOtherClass(ReflectedOne): __tablename__ = 'myothertable' class YetAnotherClass(ReflectedTwo): __tablename__ = 'yetanothertable' # ... etc. Above, the class hierarchies for ``ReflectedOne`` and ``ReflectedTwo`` can be configured separately:: ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) """ @classmethod def prepare(cls, engine): """Reflect all :class:`_schema.Table` objects for all current :class:`.DeferredReflection` subclasses""" to_map = _DeferredMapperConfig.classes_for_base(cls) for thingy in to_map: cls._sa_decl_prepare(thingy.local_table, engine) thingy.map() mapper = thingy.cls.__mapper__ metadata = mapper.class_.metadata for rel in mapper._props.values(): if ( isinstance(rel, properties.RelationshipProperty) and rel.secondary is not None ): if isinstance(rel.secondary, Table): cls._reflect_table(rel.secondary, engine) elif isinstance(rel.secondary, _class_resolver): rel.secondary._resolvers += ( cls._sa_deferred_table_resolver(engine, metadata), ) @classmethod def _sa_deferred_table_resolver(cls, engine, metadata): def _resolve(key): t1 = Table(key, metadata) cls._reflect_table(t1, engine) return t1 return _resolve @classmethod def _sa_decl_prepare(cls, local_table, engine): # autoload Table, which is already # present in the metadata. This # will fill in db-loaded columns # into the existing Table object. if local_table is not None: cls._reflect_table(local_table, engine) @classmethod def _sa_raise_deferred_config(cls): raise orm_exc.UnmappedClassError( cls, msg="Class %s is a subclass of DeferredReflection. " "Mappings are not produced until the .prepare() " "method is called on the class hierarchy." % orm_exc._safe_cls_name(cls), ) @classmethod def _reflect_table(cls, table, engine): Table( table.name, table.metadata, extend_existing=True, autoload_replace=False, autoload=True, autoload_with=engine, schema=table.schema, ) @inspection._inspects(DeclarativeMeta) def _inspect_decl_meta(cls): mp = _inspect_mapped_class(cls) if mp is None: if _DeferredMapperConfig.has_cls(cls): _DeferredMapperConfig.raise_unmapped_for_cls(cls) raise orm_exc.UnmappedClassError( cls, msg="Class %s has a deferred mapping on it. It is not yet " "usable as a mapped class." % orm_exc._safe_cls_name(cls), ) return mp
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/ext/declarative/base.py
# ext/declarative/base.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Internal implementation for declarative.""" import collections import weakref from sqlalchemy.orm import instrumentation from . import clsregistry from ... import event from ... import exc from ... import util from ...orm import class_mapper from ...orm import exc as orm_exc from ...orm import mapper from ...orm import mapperlib from ...orm import synonym from ...orm.attributes import QueryableAttribute from ...orm.base import _is_mapped_class from ...orm.base import InspectionAttr from ...orm.interfaces import MapperProperty from ...orm.properties import ColumnProperty from ...orm.properties import CompositeProperty from ...schema import Column from ...schema import Table from ...sql import expression from ...util import topological declared_attr = declarative_props = None def _declared_mapping_info(cls): # deferred mapping if _DeferredMapperConfig.has_cls(cls): return _DeferredMapperConfig.config_for_cls(cls) # regular mapping elif _is_mapped_class(cls): return class_mapper(cls, configure=False) else: return None def _resolve_for_abstract_or_classical(cls): if cls is object: return None if _get_immediate_cls_attr(cls, "__abstract__", strict=True): for sup in cls.__bases__: sup = _resolve_for_abstract_or_classical(sup) if sup is not None: return sup else: return None else: classical = _dive_for_classically_mapped_class(cls) if classical is not None: return classical else: return cls def _dive_for_classically_mapped_class(cls): # support issue #4321 # if we are within a base hierarchy, don't # search at all for classical mappings if hasattr(cls, "_decl_class_registry"): return None manager = instrumentation.manager_of_class(cls) if manager is not None: return cls else: for sup in cls.__bases__: mapper = _dive_for_classically_mapped_class(sup) if mapper is not None: return sup else: return None def _get_immediate_cls_attr(cls, attrname, strict=False): """return an attribute of the class that is either present directly on the class, e.g. not on a superclass, or is from a superclass but this superclass is a non-mapped mixin, that is, not a descendant of the declarative base and is also not classically mapped. This is used to detect attributes that indicate something about a mapped class independently from any mapped classes that it may inherit from. """ if not issubclass(cls, object): return None for base in cls.__mro__: _is_declarative_inherits = hasattr(base, "_decl_class_registry") _is_classicial_inherits = ( not _is_declarative_inherits and _dive_for_classically_mapped_class(base) is not None ) if attrname in base.__dict__ and ( base is cls or ( (base in cls.__bases__ if strict else True) and not _is_declarative_inherits and not _is_classicial_inherits ) ): return getattr(base, attrname) else: return None def _as_declarative(cls, classname, dict_): global declared_attr, declarative_props if declared_attr is None: from .api import declared_attr declarative_props = (declared_attr, util.classproperty) if _get_immediate_cls_attr(cls, "__abstract__", strict=True): return _MapperConfig.setup_mapping(cls, classname, dict_) def _check_declared_props_nocascade(obj, name, cls): if isinstance(obj, declarative_props): if getattr(obj, "_cascading", False): util.warn( "@declared_attr.cascading is not supported on the %s " "attribute on class %s. This attribute invokes for " "subclasses in any case." % (name, cls) ) return True else: return False class _MapperConfig(object): @classmethod def setup_mapping(cls, cls_, classname, dict_): defer_map = _get_immediate_cls_attr( cls_, "_sa_decl_prepare_nocascade", strict=True ) or hasattr(cls_, "_sa_decl_prepare") if defer_map: cfg_cls = _DeferredMapperConfig else: cfg_cls = _MapperConfig cfg_cls(cls_, classname, dict_) def __init__(self, cls_, classname, dict_): self.cls = cls_ # dict_ will be a dictproxy, which we can't write to, and we need to! self.dict_ = dict(dict_) self.classname = classname self.persist_selectable = None self.properties = util.OrderedDict() self.declared_columns = set() self.column_copies = {} self._setup_declared_events() # temporary registry. While early 1.0 versions # set up the ClassManager here, by API contract # we can't do that until there's a mapper. self.cls._sa_declared_attr_reg = {} self._scan_attributes() mapperlib._CONFIGURE_MUTEX.acquire() try: clsregistry.add_class(self.classname, self.cls) self._extract_mappable_attributes() self._extract_declared_columns() self._setup_table() self._setup_inheritance() self._early_mapping() finally: mapperlib._CONFIGURE_MUTEX.release() def _early_mapping(self): self.map() def _setup_declared_events(self): if _get_immediate_cls_attr(self.cls, "__declare_last__"): @event.listens_for(mapper, "after_configured") def after_configured(): self.cls.__declare_last__() if _get_immediate_cls_attr(self.cls, "__declare_first__"): @event.listens_for(mapper, "before_configured") def before_configured(): self.cls.__declare_first__() def _scan_attributes(self): cls = self.cls dict_ = self.dict_ column_copies = self.column_copies mapper_args_fn = None table_args = inherited_table_args = None tablename = None for base in cls.__mro__: class_mapped = ( base is not cls and _declared_mapping_info(base) is not None and not _get_immediate_cls_attr( base, "_sa_decl_prepare_nocascade", strict=True ) ) if not class_mapped and base is not cls: self._produce_column_copies(base) for name, obj in vars(base).items(): if name == "__mapper_args__": check_decl = _check_declared_props_nocascade( obj, name, cls ) if not mapper_args_fn and (not class_mapped or check_decl): # don't even invoke __mapper_args__ until # after we've determined everything about the # mapped table. # make a copy of it so a class-level dictionary # is not overwritten when we update column-based # arguments. def mapper_args_fn(): return dict(cls.__mapper_args__) elif name == "__tablename__": check_decl = _check_declared_props_nocascade( obj, name, cls ) if not tablename and (not class_mapped or check_decl): tablename = cls.__tablename__ elif name == "__table_args__": check_decl = _check_declared_props_nocascade( obj, name, cls ) if not table_args and (not class_mapped or check_decl): table_args = cls.__table_args__ if not isinstance( table_args, (tuple, dict, type(None)) ): raise exc.ArgumentError( "__table_args__ value must be a tuple, " "dict, or None" ) if base is not cls: inherited_table_args = True elif class_mapped: if isinstance(obj, declarative_props): util.warn( "Regular (i.e. not __special__) " "attribute '%s.%s' uses @declared_attr, " "but owning class %s is mapped - " "not applying to subclass %s." % (base.__name__, name, base, cls) ) continue elif base is not cls: # we're a mixin, abstract base, or something that is # acting like that for now. if isinstance(obj, Column): # already copied columns to the mapped class. continue elif isinstance(obj, MapperProperty): raise exc.InvalidRequestError( "Mapper properties (i.e. deferred," "column_property(), relationship(), etc.) must " "be declared as @declared_attr callables " "on declarative mixin classes." ) elif isinstance(obj, declarative_props): oldclassprop = isinstance(obj, util.classproperty) if not oldclassprop and obj._cascading: if name in dict_: # unfortunately, while we can use the user- # defined attribute here to allow a clean # override, if there's another # subclass below then it still tries to use # this. not sure if there is enough # information here to add this as a feature # later on. util.warn( "Attribute '%s' on class %s cannot be " "processed due to " "@declared_attr.cascading; " "skipping" % (name, cls) ) dict_[name] = column_copies[ obj ] = ret = obj.__get__(obj, cls) setattr(cls, name, ret) else: if oldclassprop: util.warn_deprecated( "Use of sqlalchemy.util.classproperty on " "declarative classes is deprecated." ) # access attribute using normal class access ret = getattr(cls, name) # correct for proxies created from hybrid_property # or similar. note there is no known case that # produces nested proxies, so we are only # looking one level deep right now. if ( isinstance(ret, InspectionAttr) and ret._is_internal_proxy and not isinstance( ret.original_property, MapperProperty ) ): ret = ret.descriptor dict_[name] = column_copies[obj] = ret if ( isinstance(ret, (Column, MapperProperty)) and ret.doc is None ): ret.doc = obj.__doc__ # here, the attribute is some other kind of property that # we assume is not part of the declarative mapping. # however, check for some more common mistakes else: self._warn_for_decl_attributes(base, name, obj) if inherited_table_args and not tablename: table_args = None self.table_args = table_args self.tablename = tablename self.mapper_args_fn = mapper_args_fn def _warn_for_decl_attributes(self, cls, key, c): if isinstance(c, expression.ColumnClause): util.warn( "Attribute '%s' on class %s appears to be a non-schema " "'sqlalchemy.sql.column()' " "object; this won't be part of the declarative mapping" % (key, cls) ) def _produce_column_copies(self, base): cls = self.cls dict_ = self.dict_ column_copies = self.column_copies # copy mixin columns to the mapped class for name, obj in vars(base).items(): if isinstance(obj, Column): if getattr(cls, name) is not obj: # if column has been overridden # (like by the InstrumentedAttribute of the # superclass), skip continue elif obj.foreign_keys: raise exc.InvalidRequestError( "Columns with foreign keys to other columns " "must be declared as @declared_attr callables " "on declarative mixin classes. " ) elif name not in dict_ and not ( "__table__" in dict_ and (obj.name or name) in dict_["__table__"].c ): column_copies[obj] = copy_ = obj.copy() copy_._creation_order = obj._creation_order setattr(cls, name, copy_) dict_[name] = copy_ def _extract_mappable_attributes(self): cls = self.cls dict_ = self.dict_ our_stuff = self.properties late_mapped = _get_immediate_cls_attr( cls, "_sa_decl_prepare_nocascade", strict=True ) for k in list(dict_): if k in ("__table__", "__tablename__", "__mapper_args__"): continue value = dict_[k] if isinstance(value, declarative_props): if isinstance(value, declared_attr) and value._cascading: util.warn( "Use of @declared_attr.cascading only applies to " "Declarative 'mixin' and 'abstract' classes. " "Currently, this flag is ignored on mapped class " "%s" % self.cls ) value = getattr(cls, k) elif ( isinstance(value, QueryableAttribute) and value.class_ is not cls and value.key != k ): # detect a QueryableAttribute that's already mapped being # assigned elsewhere in userland, turn into a synonym() value = synonym(value.key) setattr(cls, k, value) if ( isinstance(value, tuple) and len(value) == 1 and isinstance(value[0], (Column, MapperProperty)) ): util.warn( "Ignoring declarative-like tuple value of attribute " "'%s': possibly a copy-and-paste error with a comma " "accidentally placed at the end of the line?" % k ) continue elif not isinstance(value, (Column, MapperProperty)): # using @declared_attr for some object that # isn't Column/MapperProperty; remove from the dict_ # and place the evaluated value onto the class. if not k.startswith("__"): dict_.pop(k) self._warn_for_decl_attributes(cls, k, value) if not late_mapped: setattr(cls, k, value) continue # we expect to see the name 'metadata' in some valid cases; # however at this point we see it's assigned to something trying # to be mapped, so raise for that. elif k == "metadata": raise exc.InvalidRequestError( "Attribute name 'metadata' is reserved " "for the MetaData instance when using a " "declarative base class." ) prop = clsregistry._deferred_relationship(cls, value) our_stuff[k] = prop def _extract_declared_columns(self): our_stuff = self.properties # set up attributes in the order they were created our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) # extract columns from the class dict declared_columns = self.declared_columns name_to_prop_key = collections.defaultdict(set) for key, c in list(our_stuff.items()): if isinstance(c, (ColumnProperty, CompositeProperty)): for col in c.columns: if isinstance(col, Column) and col.table is None: _undefer_column_name(key, col) if not isinstance(c, CompositeProperty): name_to_prop_key[col.name].add(key) declared_columns.add(col) elif isinstance(c, Column): _undefer_column_name(key, c) name_to_prop_key[c.name].add(key) declared_columns.add(c) # if the column is the same name as the key, # remove it from the explicit properties dict. # the normal rules for assigning column-based properties # will take over, including precedence of columns # in multi-column ColumnProperties. if key == c.key: del our_stuff[key] for name, keys in name_to_prop_key.items(): if len(keys) > 1: util.warn( "On class %r, Column object %r named " "directly multiple times, " "only one will be used: %s. " "Consider using orm.synonym instead" % (self.classname, name, (", ".join(sorted(keys)))) ) def _setup_table(self): cls = self.cls tablename = self.tablename table_args = self.table_args dict_ = self.dict_ declared_columns = self.declared_columns declared_columns = self.declared_columns = sorted( declared_columns, key=lambda c: c._creation_order ) table = None if hasattr(cls, "__table_cls__"): table_cls = util.unbound_method_to_callable(cls.__table_cls__) else: table_cls = Table if "__table__" not in dict_: if tablename is not None: args, table_kw = (), {} if table_args: if isinstance(table_args, dict): table_kw = table_args elif isinstance(table_args, tuple): if isinstance(table_args[-1], dict): args, table_kw = table_args[0:-1], table_args[-1] else: args = table_args autoload = dict_.get("__autoload__") if autoload: table_kw["autoload"] = True cls.__table__ = table = table_cls( tablename, cls.metadata, *(tuple(declared_columns) + tuple(args)), **table_kw ) else: table = cls.__table__ if declared_columns: for c in declared_columns: if not table.c.contains_column(c): raise exc.ArgumentError( "Can't add additional column %r when " "specifying __table__" % c.key ) self.local_table = table def _setup_inheritance(self): table = self.local_table cls = self.cls table_args = self.table_args declared_columns = self.declared_columns # since we search for classical mappings now, search for # multiple mapped bases as well and raise an error. inherits = [] for c in cls.__bases__: c = _resolve_for_abstract_or_classical(c) if c is None: continue if _declared_mapping_info( c ) is not None and not _get_immediate_cls_attr( c, "_sa_decl_prepare_nocascade", strict=True ): inherits.append(c) if inherits: if len(inherits) > 1: raise exc.InvalidRequestError( "Class %s has multiple mapped bases: %r" % (cls, inherits) ) self.inherits = inherits[0] else: self.inherits = None if ( table is None and self.inherits is None and not _get_immediate_cls_attr(cls, "__no_table__") ): raise exc.InvalidRequestError( "Class %r does not have a __table__ or __tablename__ " "specified and does not inherit from an existing " "table-mapped class." % cls ) elif self.inherits: inherited_mapper = _declared_mapping_info(self.inherits) inherited_table = inherited_mapper.local_table inherited_persist_selectable = inherited_mapper.persist_selectable if table is None: # single table inheritance. # ensure no table args if table_args: raise exc.ArgumentError( "Can't place __table_args__ on an inherited class " "with no table." ) # add any columns declared here to the inherited table. for c in declared_columns: if c.name in inherited_table.c: if inherited_table.c[c.name] is c: continue raise exc.ArgumentError( "Column '%s' on class %s conflicts with " "existing column '%s'" % (c, cls, inherited_table.c[c.name]) ) if c.primary_key: raise exc.ArgumentError( "Can't place primary key columns on an inherited " "class with no table." ) inherited_table.append_column(c) if ( inherited_persist_selectable is not None and inherited_persist_selectable is not inherited_table ): inherited_persist_selectable._refresh_for_new_column(c) def _prepare_mapper_arguments(self): properties = self.properties if self.mapper_args_fn: mapper_args = self.mapper_args_fn() else: mapper_args = {} # make sure that column copies are used rather # than the original columns from any mixins for k in ("version_id_col", "polymorphic_on"): if k in mapper_args: v = mapper_args[k] mapper_args[k] = self.column_copies.get(v, v) assert ( "inherits" not in mapper_args ), "Can't specify 'inherits' explicitly with declarative mappings" if self.inherits: mapper_args["inherits"] = self.inherits if self.inherits and not mapper_args.get("concrete", False): # single or joined inheritance # exclude any cols on the inherited table which are # not mapped on the parent class, to avoid # mapping columns specific to sibling/nephew classes inherited_mapper = _declared_mapping_info(self.inherits) inherited_table = inherited_mapper.local_table if "exclude_properties" not in mapper_args: mapper_args["exclude_properties"] = exclude_properties = set( [ c.key for c in inherited_table.c if c not in inherited_mapper._columntoproperty ] ).union(inherited_mapper.exclude_properties or ()) exclude_properties.difference_update( [c.key for c in self.declared_columns] ) # look through columns in the current mapper that # are keyed to a propname different than the colname # (if names were the same, we'd have popped it out above, # in which case the mapper makes this combination). # See if the superclass has a similar column property. # If so, join them together. for k, col in list(properties.items()): if not isinstance(col, expression.ColumnElement): continue if k in inherited_mapper._props: p = inherited_mapper._props[k] if isinstance(p, ColumnProperty): # note here we place the subclass column # first. See [ticket:1892] for background. properties[k] = [col] + p.columns result_mapper_args = mapper_args.copy() result_mapper_args["properties"] = properties self.mapper_args = result_mapper_args def map(self): self._prepare_mapper_arguments() if hasattr(self.cls, "__mapper_cls__"): mapper_cls = util.unbound_method_to_callable( self.cls.__mapper_cls__ ) else: mapper_cls = mapper self.cls.__mapper__ = mp_ = mapper_cls( self.cls, self.local_table, **self.mapper_args ) del self.cls._sa_declared_attr_reg return mp_ class _DeferredMapperConfig(_MapperConfig): _configs = util.OrderedDict() def _early_mapping(self): pass @property def cls(self): return self._cls() @cls.setter def cls(self, class_): self._cls = weakref.ref(class_, self._remove_config_cls) self._configs[self._cls] = self @classmethod def _remove_config_cls(cls, ref): cls._configs.pop(ref, None) @classmethod def has_cls(cls, class_): # 2.6 fails on weakref if class_ is an old style class return isinstance(class_, type) and weakref.ref(class_) in cls._configs @classmethod def raise_unmapped_for_cls(cls, class_): if hasattr(class_, "_sa_raise_deferred_config"): class_._sa_raise_deferred_config() raise orm_exc.UnmappedClassError( class_, msg="Class %s has a deferred mapping on it. It is not yet " "usable as a mapped class." % orm_exc._safe_cls_name(class_), ) @classmethod def config_for_cls(cls, class_): return cls._configs[weakref.ref(class_)] @classmethod def classes_for_base(cls, base_cls, sort=True): classes_for_base = [ m for m, cls_ in [(m, m.cls) for m in cls._configs.values()] if cls_ is not None and issubclass(cls_, base_cls) ] if not sort: return classes_for_base all_m_by_cls = dict((m.cls, m) for m in classes_for_base) tuples = [] for m_cls in all_m_by_cls: tuples.extend( (all_m_by_cls[base_cls], all_m_by_cls[m_cls]) for base_cls in m_cls.__bases__ if base_cls in all_m_by_cls ) return list(topological.sort(tuples, classes_for_base)) def map(self): self._configs.pop(self._cls, None) return super(_DeferredMapperConfig, self).map() def _add_attribute(cls, key, value): """add an attribute to an existing declarative class. This runs through the logic to determine MapperProperty, adds it to the Mapper, adds a column to the mapped Table, etc. """ if "__mapper__" in cls.__dict__: if isinstance(value, Column): _undefer_column_name(key, value) cls.__table__.append_column(value) cls.__mapper__.add_property(key, value) elif isinstance(value, ColumnProperty): for col in value.columns: if isinstance(col, Column) and col.table is None: _undefer_column_name(key, col) cls.__table__.append_column(col) cls.__mapper__.add_property(key, value) elif isinstance(value, MapperProperty): cls.__mapper__.add_property( key, clsregistry._deferred_relationship(cls, value) ) elif isinstance(value, QueryableAttribute) and value.key != key: # detect a QueryableAttribute that's already mapped being # assigned elsewhere in userland, turn into a synonym() value = synonym(value.key) cls.__mapper__.add_property( key, clsregistry._deferred_relationship(cls, value) ) else: type.__setattr__(cls, key, value) cls.__mapper__._expire_memoizations() else: type.__setattr__(cls, key, value) def _del_attribute(cls, key): if ( "__mapper__" in cls.__dict__ and key in cls.__dict__ and not cls.__mapper__._dispose_called ): value = cls.__dict__[key] if isinstance( value, (Column, ColumnProperty, MapperProperty, QueryableAttribute) ): raise NotImplementedError( "Can't un-map individual mapped attributes on a mapped class." ) else: type.__delattr__(cls, key) cls.__mapper__._expire_memoizations() else: type.__delattr__(cls, key) def _declarative_constructor(self, **kwargs): """A simple constructor that allows initialization from kwargs. Sets attributes on the constructed instance using the names and values in ``kwargs``. Only keys that are present as attributes of the instance's class are allowed. These could be, for example, any mapped columns or relationships. """ cls_ = type(self) for k in kwargs: if not hasattr(cls_, k): raise TypeError( "%r is an invalid keyword argument for %s" % (k, cls_.__name__) ) setattr(self, k, kwargs[k]) _declarative_constructor.__name__ = "__init__" def _undefer_column_name(key, column): if column.key is None: column.key = key if column.name is None: column.name = key
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/pickleable.py
# testing/pickleable.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Classes used in pickling tests, need to be at the module level for unpickling. """ from . import fixtures class User(fixtures.ComparableEntity): pass class Order(fixtures.ComparableEntity): pass class Dingaling(fixtures.ComparableEntity): pass class EmailUser(User): pass class Address(fixtures.ComparableEntity): pass # TODO: these are kind of arbitrary.... class Child1(fixtures.ComparableEntity): pass class Child2(fixtures.ComparableEntity): pass class Parent(fixtures.ComparableEntity): pass class Screen(object): def __init__(self, obj, parent=None): self.obj = obj self.parent = parent class Foo(object): def __init__(self, moredata): self.data = "im data" self.stuff = "im stuff" self.moredata = moredata __hash__ = object.__hash__ def __eq__(self, other): return ( other.data == self.data and other.stuff == self.stuff and other.moredata == self.moredata ) class Bar(object): def __init__(self, x, y): self.x = x self.y = y __hash__ = object.__hash__ def __eq__(self, other): return ( other.__class__ is self.__class__ and other.x == self.x and other.y == self.y ) def __str__(self): return "Bar(%d, %d)" % (self.x, self.y) class OldSchool: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): return ( other.__class__ is self.__class__ and other.x == self.x and other.y == self.y ) class OldSchoolWithoutCompare: def __init__(self, x, y): self.x = x self.y = y class BarWithoutCompare(object): def __init__(self, x, y): self.x = x self.y = y def __str__(self): return "Bar(%d, %d)" % (self.x, self.y) class NotComparable(object): def __init__(self, data): self.data = data def __hash__(self): return id(self) def __eq__(self, other): return NotImplemented def __ne__(self, other): return NotImplemented class BrokenComparable(object): def __init__(self, data): self.data = data def __hash__(self): return id(self) def __eq__(self, other): raise NotImplementedError def __ne__(self, other): raise NotImplementedError
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/exclusions.py
# testing/exclusions.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import contextlib import operator import re import sys from . import config from .. import util from ..util import decorator from ..util.compat import inspect_getfullargspec def skip_if(predicate, reason=None): rule = compound() pred = _as_predicate(predicate, reason) rule.skips.add(pred) return rule def fails_if(predicate, reason=None): rule = compound() pred = _as_predicate(predicate, reason) rule.fails.add(pred) return rule class compound(object): def __init__(self): self.fails = set() self.skips = set() self.tags = set() def __add__(self, other): return self.add(other) def add(self, *others): copy = compound() copy.fails.update(self.fails) copy.skips.update(self.skips) copy.tags.update(self.tags) for other in others: copy.fails.update(other.fails) copy.skips.update(other.skips) copy.tags.update(other.tags) return copy def not_(self): copy = compound() copy.fails.update(NotPredicate(fail) for fail in self.fails) copy.skips.update(NotPredicate(skip) for skip in self.skips) copy.tags.update(self.tags) return copy @property def enabled(self): return self.enabled_for_config(config._current) def enabled_for_config(self, config): for predicate in self.skips.union(self.fails): if predicate(config): return False else: return True def matching_config_reasons(self, config): return [ predicate._as_string(config) for predicate in self.skips.union(self.fails) if predicate(config) ] def include_test(self, include_tags, exclude_tags): return bool( not self.tags.intersection(exclude_tags) and (not include_tags or self.tags.intersection(include_tags)) ) def _extend(self, other): self.skips.update(other.skips) self.fails.update(other.fails) self.tags.update(other.tags) def __call__(self, fn): if hasattr(fn, "_sa_exclusion_extend"): fn._sa_exclusion_extend._extend(self) return fn @decorator def decorate(fn, *args, **kw): return self._do(config._current, fn, *args, **kw) decorated = decorate(fn) decorated._sa_exclusion_extend = self return decorated @contextlib.contextmanager def fail_if(self): all_fails = compound() all_fails.fails.update(self.skips.union(self.fails)) try: yield except Exception as ex: all_fails._expect_failure(config._current, ex) else: all_fails._expect_success(config._current) def _do(self, cfg, fn, *args, **kw): for skip in self.skips: if skip(cfg): msg = "'%s' : %s" % ( config.get_current_test_name(), skip._as_string(cfg), ) config.skip_test(msg) try: return_value = fn(*args, **kw) except Exception as ex: self._expect_failure(cfg, ex, name=fn.__name__) else: self._expect_success(cfg, name=fn.__name__) return return_value def _expect_failure(self, config, ex, name="block"): for fail in self.fails: if fail(config): if util.py2k: str_ex = unicode(ex).encode("utf-8", errors="ignore") else: str_ex = str(ex) print( ( "%s failed as expected (%s): %s " % (name, fail._as_string(config), str_ex) ) ) break else: util.raise_(ex, with_traceback=sys.exc_info()[2]) def _expect_success(self, config, name="block"): if not self.fails: return for fail in self.fails: if fail(config): raise AssertionError( "Unexpected success for '%s' (%s)" % ( name, " and ".join( fail._as_string(config) for fail in self.fails ), ) ) def requires_tag(tagname): return tags([tagname]) def tags(tagnames): comp = compound() comp.tags.update(tagnames) return comp def only_if(predicate, reason=None): predicate = _as_predicate(predicate) return skip_if(NotPredicate(predicate), reason) def succeeds_if(predicate, reason=None): predicate = _as_predicate(predicate) return fails_if(NotPredicate(predicate), reason) class Predicate(object): @classmethod def as_predicate(cls, predicate, description=None): if isinstance(predicate, compound): return cls.as_predicate(predicate.enabled_for_config, description) elif isinstance(predicate, Predicate): if description and predicate.description is None: predicate.description = description return predicate elif isinstance(predicate, (list, set)): return OrPredicate( [cls.as_predicate(pred) for pred in predicate], description ) elif isinstance(predicate, tuple): return SpecPredicate(*predicate) elif isinstance(predicate, util.string_types): tokens = re.match( r"([\+\w]+)\s*(?:(>=|==|!=|<=|<|>)\s*([\d\.]+))?", predicate ) if not tokens: raise ValueError( "Couldn't locate DB name in predicate: %r" % predicate ) db = tokens.group(1) op = tokens.group(2) spec = ( tuple(int(d) for d in tokens.group(3).split(".")) if tokens.group(3) else None ) return SpecPredicate(db, op, spec, description=description) elif util.callable(predicate): return LambdaPredicate(predicate, description) else: assert False, "unknown predicate type: %s" % predicate def _format_description(self, config, negate=False): bool_ = self(config) if negate: bool_ = not negate return self.description % { "driver": config.db.url.get_driver_name() if config else "<no driver>", "database": config.db.url.get_backend_name() if config else "<no database>", "doesnt_support": "doesn't support" if bool_ else "does support", "does_support": "does support" if bool_ else "doesn't support", } def _as_string(self, config=None, negate=False): raise NotImplementedError() class BooleanPredicate(Predicate): def __init__(self, value, description=None): self.value = value self.description = description or "boolean %s" % value def __call__(self, config): return self.value def _as_string(self, config, negate=False): return self._format_description(config, negate=negate) class SpecPredicate(Predicate): def __init__(self, db, op=None, spec=None, description=None): self.db = db self.op = op self.spec = spec self.description = description _ops = { "<": operator.lt, ">": operator.gt, "==": operator.eq, "!=": operator.ne, "<=": operator.le, ">=": operator.ge, "in": operator.contains, "between": lambda val, pair: val >= pair[0] and val <= pair[1], } def __call__(self, config): engine = config.db if "+" in self.db: dialect, driver = self.db.split("+") else: dialect, driver = self.db, None if dialect and engine.name != dialect: return False if driver is not None and engine.driver != driver: return False if self.op is not None: assert driver is None, "DBAPI version specs not supported yet" version = _server_version(engine) oper = ( hasattr(self.op, "__call__") and self.op or self._ops[self.op] ) return oper(version, self.spec) else: return True def _as_string(self, config, negate=False): if self.description is not None: return self._format_description(config) elif self.op is None: if negate: return "not %s" % self.db else: return "%s" % self.db else: if negate: return "not %s %s %s" % (self.db, self.op, self.spec) else: return "%s %s %s" % (self.db, self.op, self.spec) class LambdaPredicate(Predicate): def __init__(self, lambda_, description=None, args=None, kw=None): spec = inspect_getfullargspec(lambda_) if not spec[0]: self.lambda_ = lambda db: lambda_() else: self.lambda_ = lambda_ self.args = args or () self.kw = kw or {} if description: self.description = description elif lambda_.__doc__: self.description = lambda_.__doc__ else: self.description = "custom function" def __call__(self, config): return self.lambda_(config) def _as_string(self, config, negate=False): return self._format_description(config) class NotPredicate(Predicate): def __init__(self, predicate, description=None): self.predicate = predicate self.description = description def __call__(self, config): return not self.predicate(config) def _as_string(self, config, negate=False): if self.description: return self._format_description(config, not negate) else: return self.predicate._as_string(config, not negate) class OrPredicate(Predicate): def __init__(self, predicates, description=None): self.predicates = predicates self.description = description def __call__(self, config): for pred in self.predicates: if pred(config): return True return False def _eval_str(self, config, negate=False): if negate: conjunction = " and " else: conjunction = " or " return conjunction.join( p._as_string(config, negate=negate) for p in self.predicates ) def _negation_str(self, config): if self.description is not None: return "Not " + self._format_description(config) else: return self._eval_str(config, negate=True) def _as_string(self, config, negate=False): if negate: return self._negation_str(config) else: if self.description is not None: return self._format_description(config) else: return self._eval_str(config) _as_predicate = Predicate.as_predicate def _is_excluded(db, op, spec): return SpecPredicate(db, op, spec)(config._current) def _server_version(engine): """Return a server_version_info tuple.""" # force metadata to be retrieved conn = engine.connect() version = getattr(engine.dialect, "server_version_info", None) if version is None: version = () conn.close() return version def db_spec(*dbs): return OrPredicate([Predicate.as_predicate(db) for db in dbs]) def open(): # noqa return skip_if(BooleanPredicate(False, "mark as execute")) def closed(): return skip_if(BooleanPredicate(True, "marked as skip")) def fails(reason=None): return fails_if(BooleanPredicate(True, reason or "expected to fail")) @decorator def future(fn, *arg): return fails_if(LambdaPredicate(fn), "Future feature") def fails_on(db, reason=None): return fails_if(db, reason) def fails_on_everything_except(*dbs): return succeeds_if(OrPredicate([Predicate.as_predicate(db) for db in dbs])) def skip(db, reason=None): return skip_if(db, reason) def only_on(dbs, reason=None): return only_if( OrPredicate( [Predicate.as_predicate(db, reason) for db in util.to_list(dbs)] ) ) def exclude(db, op, spec, reason=None): return skip_if(SpecPredicate(db, op, spec), reason) def against(config, *queries): assert queries, "no queries sent!" return OrPredicate([Predicate.as_predicate(query) for query in queries])( config )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/config.py
# testing/config.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import collections requirements = None db = None db_url = None db_opts = None file_config = None test_schema = None test_schema_2 = None _current = None _fixture_functions = None # installed by plugin_base def combinations(*comb, **kw): r"""Deliver multiple versions of a test based on positional combinations. This is a facade over pytest.mark.parametrize. :param \*comb: argument combinations. These are tuples that will be passed positionally to the decorated function. :param argnames: optional list of argument names. These are the names of the arguments in the test function that correspond to the entries in each argument tuple. pytest.mark.parametrize requires this, however the combinations function will derive it automatically if not present by using ``inspect.getfullargspec(fn).args[1:]``. Note this assumes the first argument is "self" which is discarded. :param id\_: optional id template. This is a string template that describes how the "id" for each parameter set should be defined, if any. The number of characters in the template should match the number of entries in each argument tuple. Each character describes how the corresponding entry in the argument tuple should be handled, as far as whether or not it is included in the arguments passed to the function, as well as if it is included in the tokens used to create the id of the parameter set. If omitted, the argument combinations are passed to parametrize as is. If passed, each argument combination is turned into a pytest.param() object, mapping the elements of the argument tuple to produce an id based on a character value in the same position within the string template using the following scheme:: i - the given argument is a string that is part of the id only, don't pass it as an argument n - the given argument should be passed and it should be added to the id by calling the .__name__ attribute r - the given argument should be passed and it should be added to the id by calling repr() s - the given argument should be passed and it should be added to the id by calling str() a - (argument) the given argument should be passed and it should not be used to generated the id e.g.:: @testing.combinations( (operator.eq, "eq"), (operator.ne, "ne"), (operator.gt, "gt"), (operator.lt, "lt"), id_="na" ) def test_operator(self, opfunc, name): pass The above combination will call ``.__name__`` on the first member of each tuple and use that as the "id" to pytest.param(). """ return _fixture_functions.combinations(*comb, **kw) def fixture(*arg, **kw): return _fixture_functions.fixture(*arg, **kw) def get_current_test_name(): return _fixture_functions.get_current_test_name() class Config(object): def __init__(self, db, db_opts, options, file_config): self._set_name(db) self.db = db self.db_opts = db_opts self.options = options self.file_config = file_config self.test_schema = "test_schema" self.test_schema_2 = "test_schema_2" _stack = collections.deque() _configs = set() def _set_name(self, db): if db.dialect.server_version_info: svi = ".".join(str(tok) for tok in db.dialect.server_version_info) self.name = "%s+%s_[%s]" % (db.name, db.driver, svi) else: self.name = "%s+%s" % (db.name, db.driver) @classmethod def register(cls, db, db_opts, options, file_config): """add a config as one of the global configs. If there are no configs set up yet, this config also gets set as the "_current". """ cfg = Config(db, db_opts, options, file_config) cls._configs.add(cfg) return cfg @classmethod def set_as_current(cls, config, namespace): global db, _current, db_url, test_schema, test_schema_2, db_opts _current = config db_url = config.db.url db_opts = config.db_opts test_schema = config.test_schema test_schema_2 = config.test_schema_2 namespace.db = db = config.db @classmethod def push_engine(cls, db, namespace): assert _current, "Can't push without a default Config set up" cls.push( Config( db, _current.db_opts, _current.options, _current.file_config ), namespace, ) @classmethod def push(cls, config, namespace): cls._stack.append(_current) cls.set_as_current(config, namespace) @classmethod def reset(cls, namespace): if cls._stack: cls.set_as_current(cls._stack[0], namespace) cls._stack.clear() @classmethod def all_configs(cls): return cls._configs @classmethod def all_dbs(cls): for cfg in cls.all_configs(): yield cfg.db def skip_test(self, msg): skip_test(msg) def skip_test(msg): raise _fixture_functions.skip_test_exception(msg)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/engines.py
# testing/engines.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import import re import warnings import weakref from . import config from . import uses_deprecated from .util import decorator from .. import event from .. import pool class ConnectionKiller(object): def __init__(self): self.proxy_refs = weakref.WeakKeyDictionary() self.testing_engines = weakref.WeakKeyDictionary() self.conns = set() def add_engine(self, engine): self.testing_engines[engine] = True def connect(self, dbapi_conn, con_record): self.conns.add((dbapi_conn, con_record)) def checkout(self, dbapi_con, con_record, con_proxy): self.proxy_refs[con_proxy] = True def invalidate(self, dbapi_con, con_record, exception): self.conns.discard((dbapi_con, con_record)) def _safe(self, fn): try: fn() except Exception as e: warnings.warn( "testing_reaper couldn't " "rollback/close connection: %s" % e ) def rollback_all(self): for rec in list(self.proxy_refs): if rec is not None and rec.is_valid: self._safe(rec.rollback) def close_all(self): for rec in list(self.proxy_refs): if rec is not None and rec.is_valid: self._safe(rec._close) def _after_test_ctx(self): # this can cause a deadlock with pg8000 - pg8000 acquires # prepared statement lock inside of rollback() - if async gc # is collecting in finalize_fairy, deadlock. # not sure if this should be if pypy/jython only. # note that firebird/fdb definitely needs this though for conn, rec in list(self.conns): if rec.connection is None: # this is a hint that the connection is closed, which # is causing segfaults on mysqlclient due to # https://github.com/PyMySQL/mysqlclient-python/issues/270; # try to work around here continue self._safe(conn.rollback) def _stop_test_ctx(self): if config.options.low_connections: self._stop_test_ctx_minimal() else: self._stop_test_ctx_aggressive() @uses_deprecated() def _stop_test_ctx_minimal(self): self.close_all() self.conns = set() for rec in list(self.testing_engines): if rec is not config.db: rec.dispose() @uses_deprecated() def _stop_test_ctx_aggressive(self): self.close_all() for conn, rec in list(self.conns): self._safe(conn.close) rec.connection = None self.conns = set() for rec in list(self.testing_engines): rec.dispose() def assert_all_closed(self): for rec in self.proxy_refs: if rec.is_valid: assert False testing_reaper = ConnectionKiller() def drop_all_tables(metadata, bind): testing_reaper.close_all() if hasattr(bind, "close"): bind.close() if not config.db.dialect.supports_alter: from . import assertions with assertions.expect_warnings("Can't sort tables", assert_=False): metadata.drop_all(bind) else: metadata.drop_all(bind) @decorator def assert_conns_closed(fn, *args, **kw): try: fn(*args, **kw) finally: testing_reaper.assert_all_closed() @decorator def rollback_open_connections(fn, *args, **kw): """Decorator that rolls back all open connections after fn execution.""" try: fn(*args, **kw) finally: testing_reaper.rollback_all() @decorator def close_first(fn, *args, **kw): """Decorator that closes all connections before fn execution.""" testing_reaper.close_all() fn(*args, **kw) @decorator def close_open_connections(fn, *args, **kw): """Decorator that closes all connections after fn execution.""" try: fn(*args, **kw) finally: testing_reaper.close_all() def all_dialects(exclude=None): import sqlalchemy.databases as d for name in d.__all__: # TEMPORARY if exclude and name in exclude: continue mod = getattr(d, name, None) if not mod: mod = getattr( __import__("sqlalchemy.databases.%s" % name).databases, name ) yield mod.dialect() class ReconnectFixture(object): def __init__(self, dbapi): self.dbapi = dbapi self.connections = [] self.is_stopped = False def __getattr__(self, key): return getattr(self.dbapi, key) def connect(self, *args, **kwargs): conn = self.dbapi.connect(*args, **kwargs) if self.is_stopped: self._safe(conn.close) curs = conn.cursor() # should fail on Oracle etc. # should fail for everything that didn't fail # above, connection is closed curs.execute("select 1") assert False, "simulated connect failure didn't work" else: self.connections.append(conn) return conn def _safe(self, fn): try: fn() except Exception as e: warnings.warn( "ReconnectFixture couldn't " "close connection: %s" % e ) def shutdown(self, stop=False): # TODO: this doesn't cover all cases # as nicely as we'd like, namely MySQLdb. # would need to implement R. Brewer's # proxy server idea to get better # coverage. self.is_stopped = stop for c in list(self.connections): self._safe(c.close) self.connections = [] def restart(self): self.is_stopped = False def reconnecting_engine(url=None, options=None): url = url or config.db.url dbapi = config.db.dialect.dbapi if not options: options = {} options["module"] = ReconnectFixture(dbapi) engine = testing_engine(url, options) _dispose = engine.dispose def dispose(): engine.dialect.dbapi.shutdown() engine.dialect.dbapi.is_stopped = False _dispose() engine.test_shutdown = engine.dialect.dbapi.shutdown engine.test_restart = engine.dialect.dbapi.restart engine.dispose = dispose return engine def testing_engine(url=None, options=None): """Produce an engine configured by --options with optional overrides.""" from sqlalchemy import create_engine from sqlalchemy.engine.url import make_url if not options: use_reaper = True else: use_reaper = options.pop("use_reaper", True) url = url or config.db.url url = make_url(url) if options is None: if config.db is None or url.drivername == config.db.url.drivername: options = config.db_opts else: options = {} elif config.db is not None and url.drivername == config.db.url.drivername: default_opt = config.db_opts.copy() default_opt.update(options) engine = create_engine(url, **options) engine._has_events = True # enable event blocks, helps with profiling if isinstance(engine.pool, pool.QueuePool): engine.pool._timeout = 0 engine.pool._max_overflow = 0 if use_reaper: event.listen(engine.pool, "connect", testing_reaper.connect) event.listen(engine.pool, "checkout", testing_reaper.checkout) event.listen(engine.pool, "invalidate", testing_reaper.invalidate) testing_reaper.add_engine(engine) return engine def mock_engine(dialect_name=None): """Provides a mocking engine based on the current testing.db. This is normally used to test DDL generation flow as emitted by an Engine. It should not be used in other cases, as assert_compile() and assert_sql_execution() are much better choices with fewer moving parts. """ from sqlalchemy import create_engine if not dialect_name: dialect_name = config.db.name buffer = [] def executor(sql, *a, **kw): buffer.append(sql) def assert_sql(stmts): recv = [re.sub(r"[\n\t]", "", str(s)) for s in buffer] assert recv == stmts, recv def print_sql(): d = engine.dialect return "\n".join(str(s.compile(dialect=d)) for s in engine.mock) engine = create_engine( dialect_name + "://", strategy="mock", executor=executor ) assert not hasattr(engine, "mock") engine.mock = buffer engine.assert_sql = assert_sql engine.print_sql = print_sql return engine class DBAPIProxyCursor(object): """Proxy a DBAPI cursor. Tests can provide subclasses of this to intercept DBAPI-level cursor operations. """ def __init__(self, engine, conn, *args, **kwargs): self.engine = engine self.connection = conn self.cursor = conn.cursor(*args, **kwargs) def execute(self, stmt, parameters=None, **kw): if parameters: return self.cursor.execute(stmt, parameters, **kw) else: return self.cursor.execute(stmt, **kw) def executemany(self, stmt, params, **kw): return self.cursor.executemany(stmt, params, **kw) def __getattr__(self, key): return getattr(self.cursor, key) class DBAPIProxyConnection(object): """Proxy a DBAPI connection. Tests can provide subclasses of this to intercept DBAPI-level connection operations. """ def __init__(self, engine, cursor_cls): self.conn = self._sqla_unwrap = engine.pool._creator() self.engine = engine self.cursor_cls = cursor_cls def cursor(self, *args, **kwargs): return self.cursor_cls(self.engine, self.conn, *args, **kwargs) def close(self): self.conn.close() def __getattr__(self, key): return getattr(self.conn, key) def proxying_engine( conn_cls=DBAPIProxyConnection, cursor_cls=DBAPIProxyCursor ): """Produce an engine that provides proxy hooks for common methods. """ def mock_conn(): return conn_cls(config.db, cursor_cls) return testing_engine(options={"creator": mock_conn})
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/util.py
# testing/util.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import decimal import gc import random import sys import time import types from ..util import decorator from ..util import defaultdict from ..util import inspect_getfullargspec from ..util import jython from ..util import py2k from ..util import pypy if jython: def jython_gc_collect(*args): """aggressive gc.collect for tests.""" gc.collect() time.sleep(0.1) gc.collect() gc.collect() return 0 # "lazy" gc, for VM's that don't GC on refcount == 0 gc_collect = lazy_gc = jython_gc_collect elif pypy: def pypy_gc_collect(*args): gc.collect() gc.collect() gc_collect = lazy_gc = pypy_gc_collect else: # assume CPython - straight gc.collect, lazy_gc() is a pass gc_collect = gc.collect def lazy_gc(): pass def picklers(): picklers = set() if py2k: try: import cPickle picklers.add(cPickle) except ImportError: pass import pickle picklers.add(pickle) # yes, this thing needs this much testing for pickle_ in picklers: for protocol in range(-2, pickle.HIGHEST_PROTOCOL): yield pickle_.loads, lambda d: pickle_.dumps(d, protocol) def round_decimal(value, prec): if isinstance(value, float): return round(value, prec) # can also use shift() here but that is 2.6 only return (value * decimal.Decimal("1" + "0" * prec)).to_integral( decimal.ROUND_FLOOR ) / pow(10, prec) class RandomSet(set): def __iter__(self): l = list(set.__iter__(self)) random.shuffle(l) return iter(l) def pop(self): index = random.randint(0, len(self) - 1) item = list(set.__iter__(self))[index] self.remove(item) return item def union(self, other): return RandomSet(set.union(self, other)) def difference(self, other): return RandomSet(set.difference(self, other)) def intersection(self, other): return RandomSet(set.intersection(self, other)) def copy(self): return RandomSet(self) def conforms_partial_ordering(tuples, sorted_elements): """True if the given sorting conforms to the given partial ordering.""" deps = defaultdict(set) for parent, child in tuples: deps[parent].add(child) for i, node in enumerate(sorted_elements): for n in sorted_elements[i:]: if node in deps[n]: return False else: return True def all_partial_orderings(tuples, elements): edges = defaultdict(set) for parent, child in tuples: edges[child].add(parent) def _all_orderings(elements): if len(elements) == 1: yield list(elements) else: for elem in elements: subset = set(elements).difference([elem]) if not subset.intersection(edges[elem]): for sub_ordering in _all_orderings(subset): yield [elem] + sub_ordering return iter(_all_orderings(elements)) def function_named(fn, name): """Return a function with a given __name__. Will assign to __name__ and return the original function if possible on the Python implementation, otherwise a new function will be constructed. This function should be phased out as much as possible in favor of @decorator. Tests that "generate" many named tests should be modernized. """ try: fn.__name__ = name except TypeError: fn = types.FunctionType( fn.__code__, fn.__globals__, name, fn.__defaults__, fn.__closure__ ) return fn def run_as_contextmanager(ctx, fn, *arg, **kw): """Run the given function under the given contextmanager, simulating the behavior of 'with' to support older Python versions. This is not necessary anymore as we have placed 2.6 as minimum Python version, however some tests are still using this structure. """ obj = ctx.__enter__() try: result = fn(obj, *arg, **kw) ctx.__exit__(None, None, None) return result except: exc_info = sys.exc_info() raise_ = ctx.__exit__(*exc_info) if not raise_: raise else: return raise_ def rowset(results): """Converts the results of sql execution into a plain set of column tuples. Useful for asserting the results of an unordered query. """ return {tuple(row) for row in results} def fail(msg): assert False, msg @decorator def provide_metadata(fn, *args, **kw): """Provide bound MetaData for a single test, dropping afterwards.""" from . import config from . import engines from sqlalchemy import schema metadata = schema.MetaData(config.db) self = args[0] prev_meta = getattr(self, "metadata", None) self.metadata = metadata try: return fn(*args, **kw) finally: engines.drop_all_tables(metadata, config.db) self.metadata = prev_meta def flag_combinations(*combinations): """A facade around @testing.combinations() oriented towards boolean keyword-based arguments. Basically generates a nice looking identifier based on the keywords and also sets up the argument names. E.g.:: @testing.flag_combinations( dict(lazy=False, passive=False), dict(lazy=True, passive=False), dict(lazy=False, passive=True), dict(lazy=False, passive=True, raiseload=True), ) would result in:: @testing.combinations( ('', False, False, False), ('lazy', True, False, False), ('lazy_passive', True, True, False), ('lazy_passive', True, True, True), id_='iaaa', argnames='lazy,passive,raiseload' ) """ from . import config keys = set() for d in combinations: keys.update(d) keys = sorted(keys) return config.combinations( *[ ("_".join(k for k in keys if d.get(k, False)),) + tuple(d.get(k, False) for k in keys) for d in combinations ], id_="i" + ("a" * len(keys)), argnames=",".join(keys) ) def resolve_lambda(__fn, **kw): """Given a no-arg lambda and a namespace, return a new lambda that has all the values filled in. This is used so that we can have module-level fixtures that refer to instance-level variables using lambdas. """ pos_args = inspect_getfullargspec(__fn)[0] pass_pos_args = {arg: kw.pop(arg) for arg in pos_args} glb = dict(__fn.__globals__) glb.update(kw) new_fn = types.FunctionType(__fn.__code__, glb) return new_fn(**pass_pos_args) def metadata_fixture(ddl="function"): """Provide MetaData for a pytest fixture.""" from . import config def decorate(fn): def run_ddl(self): from sqlalchemy import schema metadata = self.metadata = schema.MetaData() try: result = fn(self, metadata) metadata.create_all(config.db) # TODO: # somehow get a per-function dml erase fixture here yield result finally: metadata.drop_all(config.db) return config.fixture(scope=ddl)(run_ddl) return decorate def force_drop_names(*names): """Force the given table names to be dropped after test complete, isolating for foreign key cycles """ from . import config from sqlalchemy import inspect @decorator def go(fn, *args, **kw): try: return fn(*args, **kw) finally: drop_all_tables(config.db, inspect(config.db), include_names=names) return go class adict(dict): """Dict keys available as attributes. Shadows.""" def __getattribute__(self, key): try: return self[key] except KeyError: return dict.__getattribute__(self, key) def __call__(self, *keys): return tuple([self[key] for key in keys]) get_all = __call__ def drop_all_tables(engine, inspector, schema=None, include_names=None): from sqlalchemy import ( Column, Table, Integer, MetaData, ForeignKeyConstraint, ) from sqlalchemy.schema import DropTable, DropConstraint if include_names is not None: include_names = set(include_names) with engine.connect() as conn: for tname, fkcs in reversed( inspector.get_sorted_table_and_fkc_names(schema=schema) ): if tname: if include_names is not None and tname not in include_names: continue conn.execute( DropTable(Table(tname, MetaData(), schema=schema)) ) elif fkcs: if not engine.dialect.supports_alter: continue for tname, fkc in fkcs: if ( include_names is not None and tname not in include_names ): continue tb = Table( tname, MetaData(), Column("x", Integer), Column("y", Integer), schema=schema, ) conn.execute( DropConstraint( ForeignKeyConstraint([tb.c.x], [tb.c.y], name=fkc) ) ) def teardown_events(event_cls): @decorator def decorate(fn, *arg, **kw): try: return fn(*arg, **kw) finally: event_cls._clear() return decorate
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/warnings.py
# testing/warnings.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import import warnings from . import assertions from .. import exc as sa_exc def setup_filters(): """Set global warning behavior for the test suite.""" warnings.filterwarnings( "ignore", category=sa_exc.SAPendingDeprecationWarning ) warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning) warnings.filterwarnings("error", category=sa_exc.SAWarning) warnings.filterwarnings( "ignore", category=sa_exc.SAWarning, message=r"Oracle compatibility version .* is known to have a " "maximum identifier", ) # some selected deprecations... warnings.filterwarnings("error", category=DeprecationWarning) warnings.filterwarnings( "ignore", category=DeprecationWarning, message=".*StopIteration" ) warnings.filterwarnings( "ignore", category=DeprecationWarning, message=".*inspect.getargspec" ) try: import pytest except ImportError: pass else: warnings.filterwarnings( "once", category=pytest.PytestDeprecationWarning ) def assert_warnings(fn, warning_msgs, regex=False): """Assert that each of the given warnings are emitted by fn. Deprecated. Please use assertions.expect_warnings(). """ with assertions._expect_warnings( sa_exc.SAWarning, warning_msgs, regex=regex ): return fn()
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/__init__.py
# testing/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import config # noqa from . import mock # noqa from .assertions import assert_raises # noqa from .assertions import assert_raises_context_ok # noqa from .assertions import assert_raises_message # noqa from .assertions import assert_raises_message_context_ok # noqa from .assertions import assert_raises_return # noqa from .assertions import AssertsCompiledSQL # noqa from .assertions import AssertsExecutionResults # noqa from .assertions import ComparesTables # noqa from .assertions import emits_warning # noqa from .assertions import emits_warning_on # noqa from .assertions import eq_ # noqa from .assertions import eq_ignore_whitespace # noqa from .assertions import eq_regex # noqa from .assertions import expect_deprecated # noqa from .assertions import expect_warnings # noqa from .assertions import in_ # noqa from .assertions import is_ # noqa from .assertions import is_false # noqa from .assertions import is_instance_of # noqa from .assertions import is_not_ # noqa from .assertions import is_true # noqa from .assertions import le_ # noqa from .assertions import ne_ # noqa from .assertions import not_in_ # noqa from .assertions import startswith_ # noqa from .assertions import uses_deprecated # noqa from .config import combinations # noqa from .config import db # noqa from .config import fixture # noqa from .config import requirements as requires # noqa from .exclusions import _is_excluded # noqa from .exclusions import _server_version # noqa from .exclusions import against as _against # noqa from .exclusions import db_spec # noqa from .exclusions import exclude # noqa from .exclusions import fails # noqa from .exclusions import fails_if # noqa from .exclusions import fails_on # noqa from .exclusions import fails_on_everything_except # noqa from .exclusions import future # noqa from .exclusions import only_if # noqa from .exclusions import only_on # noqa from .exclusions import skip # noqa from .exclusions import skip_if # noqa from .util import adict # noqa from .util import fail # noqa from .util import flag_combinations # noqa from .util import force_drop_names # noqa from .util import metadata_fixture # noqa from .util import provide_metadata # noqa from .util import resolve_lambda # noqa from .util import rowset # noqa from .util import run_as_contextmanager # noqa from .util import teardown_events # noqa from .warnings import assert_warnings # noqa def against(*queries): return _against(config._current, *queries) crashes = skip
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/assertions.py
# testing/assertions.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import import contextlib import re import sys import warnings from . import assertsql from . import config from . import mock from . import util as testutil from .exclusions import db_spec from .util import fail from .. import exc as sa_exc from .. import pool from .. import schema from .. import types as sqltypes from .. import util from ..engine import default from ..engine import url from ..util import compat from ..util import decorator def expect_warnings(*messages, **kw): """Context manager which expects one or more warnings. With no arguments, squelches all SAWarnings emitted via sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise pass string expressions that will match selected warnings via regex; all non-matching warnings are sent through. The expect version **asserts** that the warnings were in fact seen. Note that the test suite sets SAWarning warnings to raise exceptions. """ return _expect_warnings(sa_exc.SAWarning, messages, **kw) @contextlib.contextmanager def expect_warnings_on(db, *messages, **kw): """Context manager which expects one or more warnings on specific dialects. The expect version **asserts** that the warnings were in fact seen. """ spec = db_spec(db) if isinstance(db, util.string_types) and not spec(config._current): yield else: with expect_warnings(*messages, **kw): yield def emits_warning(*messages): """Decorator form of expect_warnings(). Note that emits_warning does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with expect_warnings(assert_=False, *messages): return fn(*args, **kw) return decorate def expect_deprecated(*messages, **kw): return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw) def emits_warning_on(db, *messages): """Mark a test as emitting a warning on a specific dialect. With no arguments, squelches all SAWarning failures. Or pass one or more strings; these will be matched to the root of the warning description by warnings.filterwarnings(). Note that emits_warning_on does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with expect_warnings_on(db, assert_=False, *messages): return fn(*args, **kw) return decorate def uses_deprecated(*messages): """Mark a test as immune from fatal deprecation warnings. With no arguments, squelches all SADeprecationWarning failures. Or pass one or more strings; these will be matched to the root of the warning description by warnings.filterwarnings(). As a special case, you may pass a function name prefixed with // and it will be re-written as needed to match the standard warning verbiage emitted by the sqlalchemy.util.deprecated decorator. Note that uses_deprecated does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with expect_deprecated(*messages, assert_=False): return fn(*args, **kw) return decorate @contextlib.contextmanager def _expect_warnings( exc_cls, messages, regex=True, assert_=True, py2konly=False ): if regex: filters = [re.compile(msg, re.I | re.S) for msg in messages] else: filters = messages seen = set(filters) real_warn = warnings.warn def our_warn(msg, *arg, **kw): if isinstance(msg, exc_cls): exception = msg msg = str(exception) elif arg: exception = arg[0] else: exception = None if not exception or not issubclass(exception, exc_cls): return real_warn(msg, *arg, **kw) if not filters: return for filter_ in filters: if (regex and filter_.match(msg)) or ( not regex and filter_ == msg ): seen.discard(filter_) break else: real_warn(msg, *arg, **kw) with mock.patch("warnings.warn", our_warn): yield if assert_ and (not py2konly or not compat.py3k): assert not seen, "Warnings were not seen: %s" % ", ".join( "%r" % (s.pattern if regex else s) for s in seen ) def global_cleanup_assertions(): """Check things that have to be finalized at the end of a test suite. Hardcoded at the moment, a modular system can be built here to support things like PG prepared transactions, tables all dropped, etc. """ _assert_no_stray_pool_connections() _STRAY_CONNECTION_FAILURES = 0 def _assert_no_stray_pool_connections(): global _STRAY_CONNECTION_FAILURES # lazy gc on cPython means "do nothing." pool connections # shouldn't be in cycles, should go away. testutil.lazy_gc() # however, once in awhile, on an EC2 machine usually, # there's a ref in there. usually just one. if pool._refs: # OK, let's be somewhat forgiving. _STRAY_CONNECTION_FAILURES += 1 print( "Encountered a stray connection in test cleanup: %s" % str(pool._refs) ) # then do a real GC sweep. We shouldn't even be here # so a single sweep should really be doing it, otherwise # there's probably a real unreachable cycle somewhere. testutil.gc_collect() # if we've already had two of these occurrences, or # after a hard gc sweep we still have pool._refs?! # now we have to raise. if pool._refs: err = str(pool._refs) # but clean out the pool refs collection directly, # reset the counter, # so the error doesn't at least keep happening. pool._refs.clear() _STRAY_CONNECTION_FAILURES = 0 warnings.warn( "Stray connection refused to leave " "after gc.collect(): %s" % err ) elif _STRAY_CONNECTION_FAILURES > 10: assert False, "Encountered more than 10 stray connections" _STRAY_CONNECTION_FAILURES = 0 def eq_regex(a, b, msg=None): assert re.match(b, a), msg or "%r !~ %r" % (a, b) def eq_(a, b, msg=None): """Assert a == b, with repr messaging on failure.""" assert a == b, msg or "%r != %r" % (a, b) def ne_(a, b, msg=None): """Assert a != b, with repr messaging on failure.""" assert a != b, msg or "%r == %r" % (a, b) def le_(a, b, msg=None): """Assert a <= b, with repr messaging on failure.""" assert a <= b, msg or "%r != %r" % (a, b) def is_instance_of(a, b, msg=None): assert isinstance(a, b), msg or "%r is not an instance of %r" % (a, b) def is_true(a, msg=None): is_(a, True, msg=msg) def is_false(a, msg=None): is_(a, False, msg=msg) def is_(a, b, msg=None): """Assert a is b, with repr messaging on failure.""" assert a is b, msg or "%r is not %r" % (a, b) def is_not_(a, b, msg=None): """Assert a is not b, with repr messaging on failure.""" assert a is not b, msg or "%r is %r" % (a, b) def in_(a, b, msg=None): """Assert a in b, with repr messaging on failure.""" assert a in b, msg or "%r not in %r" % (a, b) def not_in_(a, b, msg=None): """Assert a in not b, with repr messaging on failure.""" assert a not in b, msg or "%r is in %r" % (a, b) def startswith_(a, fragment, msg=None): """Assert a.startswith(fragment), with repr messaging on failure.""" assert a.startswith(fragment), msg or "%r does not start with %r" % ( a, fragment, ) def eq_ignore_whitespace(a, b, msg=None): a = re.sub(r"^\s+?|\n", "", a) a = re.sub(r" {2,}", " ", a) b = re.sub(r"^\s+?|\n", "", b) b = re.sub(r" {2,}", " ", b) assert a == b, msg or "%r != %r" % (a, b) def _assert_proper_exception_context(exception): """assert that any exception we're catching does not have a __context__ without a __cause__, and that __suppress_context__ is never set. Python 3 will report nested as exceptions as "during the handling of error X, error Y occurred". That's not what we want to do. we want these exceptions in a cause chain. """ if not util.py3k: return if ( exception.__context__ is not exception.__cause__ and not exception.__suppress_context__ ): assert False, ( "Exception %r was correctly raised but did not set a cause, " "within context %r as its cause." % (exception, exception.__context__) ) def assert_raises(except_cls, callable_, *args, **kw): _assert_raises(except_cls, callable_, args, kw, check_context=True) def assert_raises_context_ok(except_cls, callable_, *args, **kw): _assert_raises( except_cls, callable_, args, kw, ) def assert_raises_return(except_cls, callable_, *args, **kw): return _assert_raises(except_cls, callable_, args, kw, check_context=True) def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): _assert_raises( except_cls, callable_, args, kwargs, msg=msg, check_context=True ) def assert_raises_message_context_ok( except_cls, msg, callable_, *args, **kwargs ): _assert_raises(except_cls, callable_, args, kwargs, msg=msg) def _assert_raises( except_cls, callable_, args, kwargs, msg=None, check_context=False ): ret_err = None if check_context: are_we_already_in_a_traceback = sys.exc_info()[0] try: callable_(*args, **kwargs) success = False except except_cls as err: ret_err = err success = True if msg is not None: assert re.search( msg, util.text_type(err), re.UNICODE ), "%r !~ %s" % (msg, err,) if check_context and not are_we_already_in_a_traceback: _assert_proper_exception_context(err) print(util.text_type(err).encode("utf-8")) # assert outside the block so it works for AssertionError too ! assert success, "Callable did not raise an exception" return ret_err class AssertsCompiledSQL(object): def assert_compile( self, clause, result, params=None, checkparams=None, dialect=None, checkpositional=None, check_prefetch=None, use_default_dialect=False, allow_dialect_select=False, literal_binds=False, schema_translate_map=None, ): if use_default_dialect: dialect = default.DefaultDialect() elif allow_dialect_select: dialect = None else: if dialect is None: dialect = getattr(self, "__dialect__", None) if dialect is None: dialect = config.db.dialect elif dialect == "default": dialect = default.DefaultDialect() elif dialect == "default_enhanced": dialect = default.StrCompileDialect() elif isinstance(dialect, util.string_types): dialect = url.URL(dialect).get_dialect()() kw = {} compile_kwargs = {} if schema_translate_map: kw["schema_translate_map"] = schema_translate_map if params is not None: kw["column_keys"] = list(params) if literal_binds: compile_kwargs["literal_binds"] = True from sqlalchemy import orm if isinstance(clause, orm.Query): context = clause._compile_context() context.statement.use_labels = True clause = context.statement elif isinstance(clause, orm.persistence.BulkUD): with mock.patch.object(clause, "_execute_stmt") as stmt_mock: clause.exec_() clause = stmt_mock.mock_calls[0][1][0] if compile_kwargs: kw["compile_kwargs"] = compile_kwargs c = clause.compile(dialect=dialect, **kw) param_str = repr(getattr(c, "params", {})) if util.py3k: param_str = param_str.encode("utf-8").decode("ascii", "ignore") print( ("\nSQL String:\n" + util.text_type(c) + param_str).encode( "utf-8" ) ) else: print( "\nSQL String:\n" + util.text_type(c).encode("utf-8") + param_str ) cc = re.sub(r"[\n\t]", "", util.text_type(c)) eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect)) if checkparams is not None: eq_(c.construct_params(params), checkparams) if checkpositional is not None: p = c.construct_params(params) eq_(tuple([p[x] for x in c.positiontup]), checkpositional) if check_prefetch is not None: eq_(c.prefetch, check_prefetch) class ComparesTables(object): def assert_tables_equal(self, table, reflected_table, strict_types=False): assert len(table.c) == len(reflected_table.c) for c, reflected_c in zip(table.c, reflected_table.c): eq_(c.name, reflected_c.name) assert reflected_c is reflected_table.c[c.name] eq_(c.primary_key, reflected_c.primary_key) eq_(c.nullable, reflected_c.nullable) if strict_types: msg = "Type '%s' doesn't correspond to type '%s'" assert isinstance(reflected_c.type, type(c.type)), msg % ( reflected_c.type, c.type, ) else: self.assert_types_base(reflected_c, c) if isinstance(c.type, sqltypes.String): eq_(c.type.length, reflected_c.type.length) eq_( {f.column.name for f in c.foreign_keys}, {f.column.name for f in reflected_c.foreign_keys}, ) if c.server_default: assert isinstance( reflected_c.server_default, schema.FetchedValue ) assert len(table.primary_key) == len(reflected_table.primary_key) for c in table.primary_key: assert reflected_table.primary_key.columns[c.name] is not None def assert_types_base(self, c1, c2): assert c1.type._compare_type_affinity(c2.type), ( "On column %r, type '%s' doesn't correspond to type '%s'" % (c1.name, c1.type, c2.type) ) class AssertsExecutionResults(object): def assert_result(self, result, class_, *objects): result = list(result) print(repr(result)) self.assert_list(result, class_, objects) def assert_list(self, result, class_, list_): self.assert_( len(result) == len(list_), "result list is not the same size as test list, " + "for class " + class_.__name__, ) for i in range(0, len(list_)): self.assert_row(class_, result[i], list_[i]) def assert_row(self, class_, rowobj, desc): self.assert_( rowobj.__class__ is class_, "item class is not " + repr(class_) ) for key, value in desc.items(): if isinstance(value, tuple): if isinstance(value[1], list): self.assert_list(getattr(rowobj, key), value[0], value[1]) else: self.assert_row(value[0], getattr(rowobj, key), value[1]) else: self.assert_( getattr(rowobj, key) == value, "attribute %s value %s does not match %s" % (key, getattr(rowobj, key), value), ) def assert_unordered_result(self, result, cls, *expected): """As assert_result, but the order of objects is not considered. The algorithm is very expensive but not a big deal for the small numbers of rows that the test suite manipulates. """ class immutabledict(dict): def __hash__(self): return id(self) found = util.IdentitySet(result) expected = {immutabledict(e) for e in expected} for wrong in util.itertools_filterfalse( lambda o: isinstance(o, cls), found ): fail( 'Unexpected type "%s", expected "%s"' % (type(wrong).__name__, cls.__name__) ) if len(found) != len(expected): fail( 'Unexpected object count "%s", expected "%s"' % (len(found), len(expected)) ) NOVALUE = object() def _compare_item(obj, spec): for key, value in spec.items(): if isinstance(value, tuple): try: self.assert_unordered_result( getattr(obj, key), value[0], *value[1] ) except AssertionError: return False else: if getattr(obj, key, NOVALUE) != value: return False return True for expected_item in expected: for found_item in found: if _compare_item(found_item, expected_item): found.remove(found_item) break else: fail( "Expected %s instance with attributes %s not found." % (cls.__name__, repr(expected_item)) ) return True def sql_execution_asserter(self, db=None): if db is None: from . import db as db return assertsql.assert_engine(db) def assert_sql_execution(self, db, callable_, *rules): with self.sql_execution_asserter(db) as asserter: result = callable_() asserter.assert_(*rules) return result def assert_sql(self, db, callable_, rules): newrules = [] for rule in rules: if isinstance(rule, dict): newrule = assertsql.AllOf( *[assertsql.CompiledSQL(k, v) for k, v in rule.items()] ) else: newrule = assertsql.CompiledSQL(*rule) newrules.append(newrule) return self.assert_sql_execution(db, callable_, *newrules) def assert_sql_count(self, db, callable_, count): self.assert_sql_execution( db, callable_, assertsql.CountStatements(count) ) def assert_multiple_sql_count(self, dbs, callable_, counts): recs = [ (self.sql_execution_asserter(db), db, count) for (db, count) in zip(dbs, counts) ] asserters = [] for ctx, db, count in recs: asserters.append(ctx.__enter__()) try: return callable_() finally: for asserter, (ctx, db, count) in zip(asserters, recs): ctx.__exit__(None, None, None) asserter.assert_(assertsql.CountStatements(count)) @contextlib.contextmanager def assert_execution(self, db, *rules): with self.sql_execution_asserter(db) as asserter: yield asserter.assert_(*rules) def assert_statement_count(self, db, count): return self.assert_execution(db, assertsql.CountStatements(count))
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/replay_fixture.py
from collections import deque import contextlib import types from . import config from . import fixtures from . import profiling from .. import create_engine from .. import MetaData from .. import util from ..orm import Session class ReplayFixtureTest(fixtures.TestBase): @contextlib.contextmanager def _dummy_ctx(self, *arg, **kw): yield def test_invocation(self): dbapi_session = ReplayableSession() creator = config.db.pool._creator def recorder(): return dbapi_session.recorder(creator()) engine = create_engine( config.db.url, creator=recorder, use_native_hstore=False ) self.metadata = MetaData(engine) self.engine = engine self.session = Session(engine) self.setup_engine() try: self._run_steps(ctx=self._dummy_ctx) finally: self.teardown_engine() engine.dispose() def player(): return dbapi_session.player() engine = create_engine( config.db.url, creator=player, use_native_hstore=False ) self.metadata = MetaData(engine) self.engine = engine self.session = Session(engine) self.setup_engine() try: self._run_steps(ctx=profiling.count_functions) finally: self.session.close() engine.dispose() def setup_engine(self): pass def teardown_engine(self): pass def _run_steps(self, ctx): raise NotImplementedError() class ReplayableSession(object): """A simple record/playback tool. This is *not* a mock testing class. It only records a session for later playback and makes no assertions on call consistency whatsoever. It's unlikely to be suitable for anything other than DB-API recording. """ Callable = object() NoAttribute = object() if util.py2k: Natives = set( [getattr(types, t) for t in dir(types) if not t.startswith("_")] ).difference( [ getattr(types, t) for t in ( "FunctionType", "BuiltinFunctionType", "MethodType", "BuiltinMethodType", "LambdaType", "UnboundMethodType", ) ] ) else: Natives = ( set( [ getattr(types, t) for t in dir(types) if not t.startswith("_") ] ) .union( [ type(t) if not isinstance(t, type) else t for t in __builtins__.values() ] ) .difference( [ getattr(types, t) for t in ( "FunctionType", "BuiltinFunctionType", "MethodType", "BuiltinMethodType", "LambdaType", ) ] ) ) def __init__(self): self.buffer = deque() def recorder(self, base): return self.Recorder(self.buffer, base) def player(self): return self.Player(self.buffer) class Recorder(object): def __init__(self, buffer, subject): self._buffer = buffer self._subject = subject def __call__(self, *args, **kw): subject, buffer = [ object.__getattribute__(self, x) for x in ("_subject", "_buffer") ] result = subject(*args, **kw) if type(result) not in ReplayableSession.Natives: buffer.append(ReplayableSession.Callable) return type(self)(buffer, result) else: buffer.append(result) return result @property def _sqla_unwrap(self): return self._subject def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: pass subject, buffer = [ object.__getattribute__(self, x) for x in ("_subject", "_buffer") ] try: result = type(subject).__getattribute__(subject, key) except AttributeError: buffer.append(ReplayableSession.NoAttribute) raise else: if type(result) not in ReplayableSession.Natives: buffer.append(ReplayableSession.Callable) return type(self)(buffer, result) else: buffer.append(result) return result class Player(object): def __init__(self, buffer): self._buffer = buffer def __call__(self, *args, **kw): buffer = object.__getattribute__(self, "_buffer") result = buffer.popleft() if result is ReplayableSession.Callable: return self else: return result @property def _sqla_unwrap(self): return None def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: pass buffer = object.__getattribute__(self, "_buffer") result = buffer.popleft() if result is ReplayableSession.Callable: return self elif result is ReplayableSession.NoAttribute: raise AttributeError(key) else: return result
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/assertsql.py
# testing/assertsql.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import collections import contextlib import re from .. import event from .. import util from ..engine import url from ..engine.default import DefaultDialect from ..engine.util import _distill_params from ..schema import _DDLCompiles class AssertRule(object): is_consumed = False errormessage = None consume_statement = True def process_statement(self, execute_observed): pass def no_more_statements(self): assert False, ( "All statements are complete, but pending " "assertion rules remain" ) class SQLMatchRule(AssertRule): pass class CursorSQL(SQLMatchRule): consume_statement = False def __init__(self, statement, params=None): self.statement = statement self.params = params def process_statement(self, execute_observed): stmt = execute_observed.statements[0] if self.statement != stmt.statement or ( self.params is not None and self.params != stmt.parameters ): self.errormessage = ( "Testing for exact SQL %s parameters %s received %s %s" % ( self.statement, self.params, stmt.statement, stmt.parameters, ) ) else: execute_observed.statements.pop(0) self.is_consumed = True if not execute_observed.statements: self.consume_statement = True class CompiledSQL(SQLMatchRule): def __init__(self, statement, params=None, dialect="default"): self.statement = statement self.params = params self.dialect = dialect def _compare_sql(self, execute_observed, received_statement): stmt = re.sub(r"[\n\t]", "", self.statement) return received_statement == stmt def _compile_dialect(self, execute_observed): if self.dialect == "default": return DefaultDialect() else: # ugh if self.dialect == "postgresql": params = {"implicit_returning": True} else: params = {} return url.URL(self.dialect).get_dialect()(**params) def _received_statement(self, execute_observed): """reconstruct the statement and params in terms of a target dialect, which for CompiledSQL is just DefaultDialect.""" context = execute_observed.context compare_dialect = self._compile_dialect(execute_observed) if isinstance(context.compiled.statement, _DDLCompiles): compiled = context.compiled.statement.compile( dialect=compare_dialect, schema_translate_map=context.execution_options.get( "schema_translate_map" ), ) else: compiled = context.compiled.statement.compile( dialect=compare_dialect, column_keys=context.compiled.column_keys, inline=context.compiled.inline, schema_translate_map=context.execution_options.get( "schema_translate_map" ), ) _received_statement = re.sub(r"[\n\t]", "", util.text_type(compiled)) parameters = execute_observed.parameters if not parameters: _received_parameters = [compiled.construct_params()] else: _received_parameters = [ compiled.construct_params(m) for m in parameters ] return _received_statement, _received_parameters def process_statement(self, execute_observed): context = execute_observed.context _received_statement, _received_parameters = self._received_statement( execute_observed ) params = self._all_params(context) equivalent = self._compare_sql(execute_observed, _received_statement) if equivalent: if params is not None: all_params = list(params) all_received = list(_received_parameters) while all_params and all_received: param = dict(all_params.pop(0)) for idx, received in enumerate(list(all_received)): # do a positive compare only for param_key in param: # a key in param did not match current # 'received' if ( param_key not in received or received[param_key] != param[param_key] ): break else: # all keys in param matched 'received'; # onto next param del all_received[idx] break else: # param did not match any entry # in all_received equivalent = False break if all_params or all_received: equivalent = False if equivalent: self.is_consumed = True self.errormessage = None else: self.errormessage = self._failure_message(params) % { "received_statement": _received_statement, "received_parameters": _received_parameters, } def _all_params(self, context): if self.params: if util.callable(self.params): params = self.params(context) else: params = self.params if not isinstance(params, list): params = [params] return params else: return None def _failure_message(self, expected_params): return ( "Testing for compiled statement %r partial params %s, " "received %%(received_statement)r with params " "%%(received_parameters)r" % ( self.statement.replace("%", "%%"), repr(expected_params).replace("%", "%%"), ) ) class RegexSQL(CompiledSQL): def __init__(self, regex, params=None): SQLMatchRule.__init__(self) self.regex = re.compile(regex) self.orig_regex = regex self.params = params self.dialect = "default" def _failure_message(self, expected_params): return ( "Testing for compiled statement ~%r partial params %s, " "received %%(received_statement)r with params " "%%(received_parameters)r" % ( self.orig_regex.replace("%", "%%"), repr(expected_params).replace("%", "%%"), ) ) def _compare_sql(self, execute_observed, received_statement): return bool(self.regex.match(received_statement)) class DialectSQL(CompiledSQL): def _compile_dialect(self, execute_observed): return execute_observed.context.dialect def _compare_no_space(self, real_stmt, received_stmt): stmt = re.sub(r"[\n\t]", "", real_stmt) return received_stmt == stmt def _received_statement(self, execute_observed): received_stmt, received_params = super( DialectSQL, self )._received_statement(execute_observed) # TODO: why do we need this part? for real_stmt in execute_observed.statements: if self._compare_no_space(real_stmt.statement, received_stmt): break else: raise AssertionError( "Can't locate compiled statement %r in list of " "statements actually invoked" % received_stmt ) return received_stmt, execute_observed.context.compiled_parameters def _compare_sql(self, execute_observed, received_statement): stmt = re.sub(r"[\n\t]", "", self.statement) # convert our comparison statement to have the # paramstyle of the received paramstyle = execute_observed.context.dialect.paramstyle if paramstyle == "pyformat": stmt = re.sub(r":([\w_]+)", r"%(\1)s", stmt) else: # positional params repl = None if paramstyle == "qmark": repl = "?" elif paramstyle == "format": repl = r"%s" elif paramstyle == "numeric": repl = None stmt = re.sub(r":([\w_]+)", repl, stmt) return received_statement == stmt class CountStatements(AssertRule): def __init__(self, count): self.count = count self._statement_count = 0 def process_statement(self, execute_observed): self._statement_count += 1 def no_more_statements(self): if self.count != self._statement_count: assert False, "desired statement count %d does not match %d" % ( self.count, self._statement_count, ) class AllOf(AssertRule): def __init__(self, *rules): self.rules = set(rules) def process_statement(self, execute_observed): for rule in list(self.rules): rule.errormessage = None rule.process_statement(execute_observed) if rule.is_consumed: self.rules.discard(rule) if not self.rules: self.is_consumed = True break elif not rule.errormessage: # rule is not done yet self.errormessage = None break else: self.errormessage = list(self.rules)[0].errormessage class EachOf(AssertRule): def __init__(self, *rules): self.rules = list(rules) def process_statement(self, execute_observed): while self.rules: rule = self.rules[0] rule.process_statement(execute_observed) if rule.is_consumed: self.rules.pop(0) elif rule.errormessage: self.errormessage = rule.errormessage if rule.consume_statement: break if not self.rules: self.is_consumed = True def no_more_statements(self): if self.rules and not self.rules[0].is_consumed: self.rules[0].no_more_statements() elif self.rules: super(EachOf, self).no_more_statements() class Or(AllOf): def process_statement(self, execute_observed): for rule in self.rules: rule.process_statement(execute_observed) if rule.is_consumed: self.is_consumed = True break else: self.errormessage = list(self.rules)[0].errormessage class SQLExecuteObserved(object): def __init__(self, context, clauseelement, multiparams, params): self.context = context self.clauseelement = clauseelement self.parameters = _distill_params(multiparams, params) self.statements = [] class SQLCursorExecuteObserved( collections.namedtuple( "SQLCursorExecuteObserved", ["statement", "parameters", "context", "executemany"], ) ): pass class SQLAsserter(object): def __init__(self): self.accumulated = [] def _close(self): self._final = self.accumulated del self.accumulated def assert_(self, *rules): rule = EachOf(*rules) observed = list(self._final) while observed: statement = observed.pop(0) rule.process_statement(statement) if rule.is_consumed: break elif rule.errormessage: assert False, rule.errormessage if observed: assert False, "Additional SQL statements remain" elif not rule.is_consumed: rule.no_more_statements() @contextlib.contextmanager def assert_engine(engine): asserter = SQLAsserter() orig = [] @event.listens_for(engine, "before_execute") def connection_execute(conn, clauseelement, multiparams, params): # grab the original statement + params before any cursor # execution orig[:] = clauseelement, multiparams, params @event.listens_for(engine, "after_cursor_execute") def cursor_execute( conn, cursor, statement, parameters, context, executemany ): if not context: return # then grab real cursor statements and associate them all # around a single context if ( asserter.accumulated and asserter.accumulated[-1].context is context ): obs = asserter.accumulated[-1] else: obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2]) asserter.accumulated.append(obs) obs.statements.append( SQLCursorExecuteObserved( statement, parameters, context, executemany ) ) try: yield asserter finally: event.remove(engine, "after_cursor_execute", cursor_execute) event.remove(engine, "before_execute", connection_execute) asserter._close()
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/provision.py
import collections import logging from . import config from . import engines from ..engine import url as sa_url from ..util import compat log = logging.getLogger(__name__) FOLLOWER_IDENT = None class register(object): def __init__(self): self.fns = {} @classmethod def init(cls, fn): return register().for_db("*")(fn) def for_db(self, dbname): def decorate(fn): self.fns[dbname] = fn return self return decorate def __call__(self, cfg, *arg): if isinstance(cfg, compat.string_types): url = sa_url.make_url(cfg) elif isinstance(cfg, sa_url.URL): url = cfg else: url = cfg.db.url backend = url.get_backend_name() if backend in self.fns: return self.fns[backend](cfg, *arg) else: return self.fns["*"](cfg, *arg) def create_follower_db(follower_ident): for cfg in _configs_for_db_operation(): log.info("CREATE database %s, URI %r", follower_ident, cfg.db.url) create_db(cfg, cfg.db, follower_ident) def setup_config(db_url, options, file_config, follower_ident): # load the dialect, which should also have it set up its provision # hooks dialect = sa_url.make_url(db_url).get_dialect() dialect.load_provisioning() if follower_ident: db_url = follower_url_from_main(db_url, follower_ident) db_opts = {} update_db_opts(db_url, db_opts) eng = engines.testing_engine(db_url, db_opts) post_configure_engine(db_url, eng, follower_ident) eng.connect().close() cfg = config.Config.register(eng, db_opts, options, file_config) if follower_ident: configure_follower(cfg, follower_ident) return cfg def drop_follower_db(follower_ident): for cfg in _configs_for_db_operation(): log.info("DROP database %s, URI %r", follower_ident, cfg.db.url) drop_db(cfg, cfg.db, follower_ident) def _configs_for_db_operation(): hosts = set() for cfg in config.Config.all_configs(): cfg.db.dispose() for cfg in config.Config.all_configs(): url = cfg.db.url backend = url.get_backend_name() host_conf = (backend, url.username, url.host, url.database) if host_conf not in hosts: yield cfg hosts.add(host_conf) for cfg in config.Config.all_configs(): cfg.db.dispose() @register.init def create_db(cfg, eng, ident): """Dynamically create a database for testing. Used when a test run will employ multiple processes, e.g., when run via `tox` or `pytest -n4`. """ raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) @register.init def drop_db(cfg, eng, ident): """Drop a database that we dynamically created for testing.""" raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) @register.init def update_db_opts(db_url, db_opts): """Set database options (db_opts) for a test database that we created. """ pass @register.init def post_configure_engine(url, engine, follower_ident): """Perform extra steps after configuring an engine for testing. (For the internal dialects, currently only used by sqlite.) """ pass @register.init def follower_url_from_main(url, ident): """Create a connection URL for a dynamically-created test database. :param url: the connection URL specified when the test run was invoked :param ident: the pytest-xdist "worker identifier" to be used as the database name """ url = sa_url.make_url(url) url.database = ident return url @register.init def configure_follower(cfg, ident): """Create dialect-specific config settings for a follower database.""" pass @register.init def run_reap_dbs(url, ident): """Remove databases that were created during the test process, after the process has ended. This is an optional step that is invoked for certain backends that do not reliably release locks on the database as long as a process is still in use. For the internal dialects, this is currently only necessary for mssql and oracle. """ pass def reap_dbs(idents_file): log.info("Reaping databases...") urls = collections.defaultdict(set) idents = collections.defaultdict(set) dialects = {} with open(idents_file) as file_: for line in file_: line = line.strip() db_name, db_url = line.split(" ") url_obj = sa_url.make_url(db_url) if db_name not in dialects: dialects[db_name] = url_obj.get_dialect() dialects[db_name].load_provisioning() url_key = (url_obj.get_backend_name(), url_obj.host) urls[url_key].add(db_url) idents[url_key].add(db_name) for url_key in urls: url = list(urls[url_key])[0] ident = idents[url_key] run_reap_dbs(url, ident) @register.init def temp_table_keyword_args(cfg, eng): """Specify keyword arguments for creating a temporary Table. Dialect-specific implementations of this method will return the kwargs that are passed to the Table method when creating a temporary table for testing, e.g., in the define_temp_tables method of the ComponentReflectionTest class in suite/test_reflection.py """ raise NotImplementedError( "no temp table keyword args routine for cfg: %s" % eng.url )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/requirements.py
# testing/requirements.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Global database feature support policy. Provides decorators to mark tests requiring specific feature support from the target database. External dialect test suites should subclass SuiteRequirements to provide specific inclusion/exclusions. """ import sys from . import exclusions from .. import util class Requirements(object): pass class SuiteRequirements(Requirements): @property def create_table(self): """target platform can emit basic CreateTable DDL.""" return exclusions.open() @property def drop_table(self): """target platform can emit basic DropTable DDL.""" return exclusions.open() @property def foreign_keys(self): """Target database must support foreign keys.""" return exclusions.open() @property def on_update_cascade(self): """"target database must support ON UPDATE..CASCADE behavior in foreign keys.""" return exclusions.open() @property def non_updating_cascade(self): """target database must *not* support ON UPDATE..CASCADE behavior in foreign keys.""" return exclusions.closed() @property def deferrable_fks(self): return exclusions.closed() @property def on_update_or_deferrable_fks(self): # TODO: exclusions should be composable, # somehow only_if([x, y]) isn't working here, negation/conjunctions # getting confused. return exclusions.only_if( lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled ) @property def self_referential_foreign_keys(self): """Target database must support self-referential foreign keys.""" return exclusions.open() @property def foreign_key_ddl(self): """Target database must support the DDL phrases for FOREIGN KEY.""" return exclusions.open() @property def named_constraints(self): """target database must support names for constraints.""" return exclusions.open() @property def subqueries(self): """Target database must support subqueries.""" return exclusions.open() @property def offset(self): """target database can render OFFSET, or an equivalent, in a SELECT. """ return exclusions.open() @property def bound_limit_offset(self): """target database can render LIMIT and/or OFFSET using a bound parameter """ return exclusions.open() @property def sql_expression_limit_offset(self): """target database can render LIMIT and/or OFFSET with a complete SQL expression, such as one that uses the addition operator. parameter """ return exclusions.open() @property def parens_in_union_contained_select_w_limit_offset(self): """Target database must support parenthesized SELECT in UNION when LIMIT/OFFSET is specifically present. E.g. (SELECT ...) UNION (SELECT ..) This is known to fail on SQLite. """ return exclusions.open() @property def parens_in_union_contained_select_wo_limit_offset(self): """Target database must support parenthesized SELECT in UNION when OFFSET/LIMIT is specifically not present. E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..) This is known to fail on SQLite. It also fails on Oracle because without LIMIT/OFFSET, there is currently no step that creates an additional subquery. """ return exclusions.open() @property def boolean_col_expressions(self): """Target database must support boolean expressions as columns""" return exclusions.closed() @property def nullable_booleans(self): """Target database allows boolean columns to store NULL.""" return exclusions.open() @property def nullsordering(self): """Target backends that support nulls ordering.""" return exclusions.closed() @property def standalone_binds(self): """target database/driver supports bound parameters as column expressions without being in the context of a typed column. """ return exclusions.closed() @property def standalone_null_binds_whereclause(self): """target database/driver supports bound parameters with NULL in the WHERE clause, in situations where it has to be typed. """ return exclusions.open() @property def intersect(self): """Target database must support INTERSECT or equivalent.""" return exclusions.closed() @property def except_(self): """Target database must support EXCEPT or equivalent (i.e. MINUS).""" return exclusions.closed() @property def window_functions(self): """Target database must support window functions.""" return exclusions.closed() @property def ctes(self): """Target database supports CTEs""" return exclusions.closed() @property def ctes_with_update_delete(self): """target database supports CTES that ride on top of a normal UPDATE or DELETE statement which refers to the CTE in a correlated subquery. """ return exclusions.closed() @property def ctes_on_dml(self): """target database supports CTES which consist of INSERT, UPDATE or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)""" return exclusions.closed() @property def autoincrement_insert(self): """target platform generates new surrogate integer primary key values when insert() is executed, excluding the pk column.""" return exclusions.open() @property def fetch_rows_post_commit(self): """target platform will allow cursor.fetchone() to proceed after a COMMIT. Typically this refers to an INSERT statement with RETURNING which is invoked within "autocommit". If the row can be returned after the autocommit, then this rule can be open. """ return exclusions.open() @property def group_by_complex_expression(self): """target platform supports SQL expressions in GROUP BY e.g. SELECT x + y AS somelabel FROM table GROUP BY x + y """ return exclusions.open() @property def sane_rowcount(self): return exclusions.skip_if( lambda config: not config.db.dialect.supports_sane_rowcount, "driver doesn't support 'sane' rowcount", ) @property def sane_multi_rowcount(self): return exclusions.fails_if( lambda config: not config.db.dialect.supports_sane_multi_rowcount, "driver %(driver)s %(doesnt_support)s 'sane' multi row count", ) @property def sane_rowcount_w_returning(self): return exclusions.fails_if( lambda config: not ( config.db.dialect.supports_sane_rowcount_returning ), "driver doesn't support 'sane' rowcount when returning is on", ) @property def empty_inserts(self): """target platform supports INSERT with no values, i.e. INSERT DEFAULT VALUES or equivalent.""" return exclusions.only_if( lambda config: config.db.dialect.supports_empty_insert or config.db.dialect.supports_default_values, "empty inserts not supported", ) @property def insert_from_select(self): """target platform supports INSERT from a SELECT.""" return exclusions.open() @property def returning(self): """target platform supports RETURNING.""" return exclusions.only_if( lambda config: config.db.dialect.implicit_returning, "%(database)s %(does_support)s 'returning'", ) @property def tuple_in(self): """Target platform supports the syntax "(x, y) IN ((x1, y1), (x2, y2), ...)" """ return exclusions.closed() @property def duplicate_names_in_cursor_description(self): """target platform supports a SELECT statement that has the same name repeated more than once in the columns list.""" return exclusions.open() @property def denormalized_names(self): """Target database must have 'denormalized', i.e. UPPERCASE as case insensitive names.""" return exclusions.skip_if( lambda config: not config.db.dialect.requires_name_normalize, "Backend does not require denormalized names.", ) @property def multivalues_inserts(self): """target database must support multiple VALUES clauses in an INSERT statement.""" return exclusions.skip_if( lambda config: not config.db.dialect.supports_multivalues_insert, "Backend does not support multirow inserts.", ) @property def implements_get_lastrowid(self): """"target dialect implements the executioncontext.get_lastrowid() method without reliance on RETURNING. """ return exclusions.open() @property def emulated_lastrowid(self): """"target dialect retrieves cursor.lastrowid, or fetches from a database-side function after an insert() construct executes, within the get_lastrowid() method. Only dialects that "pre-execute", or need RETURNING to get last inserted id, would return closed/fail/skip for this. """ return exclusions.closed() @property def dbapi_lastrowid(self): """"target platform includes a 'lastrowid' accessor on the DBAPI cursor object. """ return exclusions.closed() @property def views(self): """Target database must support VIEWs.""" return exclusions.closed() @property def schemas(self): """Target database must support external schemas, and have one named 'test_schema'.""" return exclusions.closed() @property def cross_schema_fk_reflection(self): """target system must support reflection of inter-schema foreign keys """ return exclusions.closed() @property def implicit_default_schema(self): """target system has a strong concept of 'default' schema that can be referred to implicitly. basically, PostgreSQL. """ return exclusions.closed() @property def server_side_cursors(self): """Target dialect must support server side cursors.""" return exclusions.only_if( [lambda config: config.db.dialect.supports_server_side_cursors], "no server side cursors support", ) @property def sequences(self): """Target database must support SEQUENCEs.""" return exclusions.only_if( [lambda config: config.db.dialect.supports_sequences], "no sequence support", ) @property def sequences_optional(self): """Target database supports sequences, but also optionally as a means of generating new PK values.""" return exclusions.only_if( [ lambda config: config.db.dialect.supports_sequences and config.db.dialect.sequences_optional ], "no sequence support, or sequences not optional", ) @property def supports_lastrowid(self): """target database / driver supports cursor.lastrowid as a means of retrieving the last inserted primary key value. note that if the target DB supports sequences also, this is still assumed to work. This is a new use case brought on by MariaDB 10.3. """ return exclusions.only_if( [lambda config: config.db.dialect.postfetch_lastrowid] ) @property def no_lastrowid_support(self): """the opposite of supports_lastrowid""" return exclusions.NotPredicate(self.supports_lastrowid) @property def reflects_pk_names(self): return exclusions.closed() @property def table_reflection(self): return exclusions.open() @property def comment_reflection(self): return exclusions.closed() @property def view_column_reflection(self): """target database must support retrieval of the columns in a view, similarly to how a table is inspected. This does not include the full CREATE VIEW definition. """ return self.views @property def view_reflection(self): """target database must support inspection of the full CREATE VIEW definition. """ return self.views @property def schema_reflection(self): return self.schemas @property def primary_key_constraint_reflection(self): return exclusions.open() @property def foreign_key_constraint_reflection(self): return exclusions.open() @property def foreign_key_constraint_option_reflection_ondelete(self): return exclusions.closed() @property def foreign_key_constraint_option_reflection_onupdate(self): return exclusions.closed() @property def temp_table_reflection(self): return exclusions.open() @property def temp_table_names(self): """target dialect supports listing of temporary table names""" return exclusions.closed() @property def temporary_tables(self): """target database supports temporary tables""" return exclusions.open() @property def temporary_views(self): """target database supports temporary views""" return exclusions.closed() @property def index_reflection(self): return exclusions.open() @property def indexes_with_ascdesc(self): """target database supports CREATE INDEX with per-column ASC/DESC.""" return exclusions.open() @property def indexes_with_expressions(self): """target database supports CREATE INDEX against SQL expressions.""" return exclusions.closed() @property def unique_constraint_reflection(self): """target dialect supports reflection of unique constraints""" return exclusions.open() @property def check_constraint_reflection(self): """target dialect supports reflection of check constraints""" return exclusions.closed() @property def duplicate_key_raises_integrity_error(self): """target dialect raises IntegrityError when reporting an INSERT with a primary key violation. (hint: it should) """ return exclusions.open() @property def unbounded_varchar(self): """Target database must support VARCHAR with no length""" return exclusions.open() @property def unicode_data(self): """Target database/dialect must support Python unicode objects with non-ASCII characters represented, delivered as bound parameters as well as in result rows. """ return exclusions.open() @property def unicode_ddl(self): """Target driver must support some degree of non-ascii symbol names. """ return exclusions.closed() @property def datetime_literals(self): """target dialect supports rendering of a date, time, or datetime as a literal string, e.g. via the TypeEngine.literal_processor() method. """ return exclusions.closed() @property def datetime(self): """target dialect supports representation of Python datetime.datetime() objects.""" return exclusions.open() @property def datetime_microseconds(self): """target dialect supports representation of Python datetime.datetime() with microsecond objects.""" return exclusions.open() @property def timestamp_microseconds(self): """target dialect supports representation of Python datetime.datetime() with microsecond objects but only if TIMESTAMP is used.""" return exclusions.closed() @property def datetime_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1970) values.""" return exclusions.closed() @property def date(self): """target dialect supports representation of Python datetime.date() objects.""" return exclusions.open() @property def date_coerces_from_datetime(self): """target dialect accepts a datetime object as the target of a date column.""" return exclusions.open() @property def date_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1970) values.""" return exclusions.closed() @property def time(self): """target dialect supports representation of Python datetime.time() objects.""" return exclusions.open() @property def time_microseconds(self): """target dialect supports representation of Python datetime.time() with microsecond objects.""" return exclusions.open() @property def binary_comparisons(self): """target database/driver can allow BLOB/BINARY fields to be compared against a bound parameter value. """ return exclusions.open() @property def binary_literals(self): """target backend supports simple binary literals, e.g. an expression like:: SELECT CAST('foo' AS BINARY) Where ``BINARY`` is the type emitted from :class:`.LargeBinary`, e.g. it could be ``BLOB`` or similar. Basically fails on Oracle. """ return exclusions.open() @property def autocommit(self): """target dialect supports 'AUTOCOMMIT' as an isolation_level""" return exclusions.closed() @property def isolation_level(self): """target dialect supports general isolation level settings. Note that this requirement, when enabled, also requires that the get_isolation_levels() method be implemented. """ return exclusions.closed() def get_isolation_levels(self, config): """Return a structure of supported isolation levels for the current testing dialect. The structure indicates to the testing suite what the expected "default" isolation should be, as well as the other values that are accepted. The dictionary has two keys, "default" and "supported". The "supported" key refers to a list of all supported levels and it should include AUTOCOMMIT if the dialect supports it. If the :meth:`.DefaultRequirements.isolation_level` requirement is not open, then this method has no return value. E.g.:: >>> testing.requirements.get_isolation_levels() { "default": "READ_COMMITED", "supported": [ "SERIALIZABLE", "READ UNCOMMITTED", "READ COMMITTED", "REPEATABLE READ", "AUTOCOMMIT" ] } """ @property def json_type(self): """target platform implements a native JSON type.""" return exclusions.closed() @property def json_array_indexes(self): """"target platform supports numeric array indexes within a JSON structure""" return self.json_type @property def json_index_supplementary_unicode_element(self): return exclusions.open() @property def precision_numerics_general(self): """target backend has general support for moderately high-precision numerics.""" return exclusions.open() @property def precision_numerics_enotation_small(self): """target backend supports Decimal() objects using E notation to represent very small values.""" return exclusions.closed() @property def precision_numerics_enotation_large(self): """target backend supports Decimal() objects using E notation to represent very large values.""" return exclusions.closed() @property def precision_numerics_many_significant_digits(self): """target backend supports values with many digits on both sides, such as 319438950232418390.273596, 87673.594069654243 """ return exclusions.closed() @property def implicit_decimal_binds(self): """target backend will return a selected Decimal as a Decimal, not a string. e.g.:: expr = decimal.Decimal("15.7563") value = e.scalar( select([literal(expr)]) ) assert value == expr See :ticket:`4036` """ return exclusions.open() @property def nested_aggregates(self): """target database can select an aggregate from a subquery that's also using an aggregate """ return exclusions.open() @property def recursive_fk_cascade(self): """target database must support ON DELETE CASCADE on a self-referential foreign key """ return exclusions.open() @property def precision_numerics_retains_significant_digits(self): """A precision numeric type will return empty significant digits, i.e. a value such as 10.000 will come back in Decimal form with the .000 maintained.""" return exclusions.closed() @property def precision_generic_float_type(self): """target backend will return native floating point numbers with at least seven decimal places when using the generic Float type. """ return exclusions.open() @property def floats_to_four_decimals(self): """target backend can return a floating-point number with four significant digits (such as 15.7563) accurately (i.e. without FP inaccuracies, such as 15.75629997253418). """ return exclusions.open() @property def fetch_null_from_numeric(self): """target backend doesn't crash when you try to select a NUMERIC value that has a value of NULL. Added to support Pyodbc bug #351. """ return exclusions.open() @property def text_type(self): """Target database must support an unbounded Text() " "type such as TEXT or CLOB""" return exclusions.open() @property def empty_strings_varchar(self): """target database can persist/return an empty string with a varchar. """ return exclusions.open() @property def empty_strings_text(self): """target database can persist/return an empty string with an unbounded text.""" return exclusions.open() @property def expressions_against_unbounded_text(self): """target database supports use of an unbounded textual field in a WHERE clause.""" return exclusions.open() @property def selectone(self): """target driver must support the literal statement 'select 1'""" return exclusions.open() @property def savepoints(self): """Target database must support savepoints.""" return exclusions.closed() @property def two_phase_transactions(self): """Target database must support two-phase transactions.""" return exclusions.closed() @property def update_from(self): """Target must support UPDATE..FROM syntax""" return exclusions.closed() @property def delete_from(self): """Target must support DELETE FROM..FROM or DELETE..USING syntax""" return exclusions.closed() @property def update_where_target_in_subquery(self): """Target must support UPDATE (or DELETE) where the same table is present in a subquery in the WHERE clause. This is an ANSI-standard syntax that apparently MySQL can't handle, such as:: UPDATE documents SET flag=1 WHERE documents.title IN (SELECT max(documents.title) AS title FROM documents GROUP BY documents.user_id ) """ return exclusions.open() @property def mod_operator_as_percent_sign(self): """target database must use a plain percent '%' as the 'modulus' operator.""" return exclusions.closed() @property def percent_schema_names(self): """target backend supports weird identifiers with percent signs in them, e.g. 'some % column'. this is a very weird use case but often has problems because of DBAPIs that use python formatting. It's not a critical use case either. """ return exclusions.closed() @property def order_by_col_from_union(self): """target database supports ordering by a column from a SELECT inside of a UNION E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id """ return exclusions.open() @property def order_by_label_with_expression(self): """target backend supports ORDER BY a column label within an expression. Basically this:: select data as foo from test order by foo || 'bar' Lots of databases including PostgreSQL don't support this, so this is off by default. """ return exclusions.closed() @property def order_by_collation(self): def check(config): try: self.get_order_by_collation(config) return False except NotImplementedError: return True return exclusions.skip_if(check) def get_order_by_collation(self, config): raise NotImplementedError() @property def unicode_connections(self): """Target driver must support non-ASCII characters being passed at all. """ return exclusions.open() @property def graceful_disconnects(self): """Target driver must raise a DBAPI-level exception, such as InterfaceError, when the underlying connection has been closed and the execute() method is called. """ return exclusions.open() @property def independent_connections(self): """ Target must support simultaneous, independent database connections. """ return exclusions.open() @property def skip_mysql_on_windows(self): """Catchall for a large variety of MySQL on Windows failures""" return exclusions.open() @property def ad_hoc_engines(self): """Test environment must allow ad-hoc engine/connection creation. DBs that scale poorly for many connections, even when closed, i.e. Oracle, may use the "--low-connections" option which flags this requirement as not present. """ return exclusions.skip_if( lambda config: config.options.low_connections ) @property def timing_intensive(self): return exclusions.requires_tag("timing_intensive") @property def memory_intensive(self): return exclusions.requires_tag("memory_intensive") @property def threading_with_mock(self): """Mark tests that use threading and mock at the same time - stability issues have been observed with coverage + python 3.3 """ return exclusions.skip_if( lambda config: util.py3k and config.options.has_coverage, "Stability issues with coverage + py3k", ) @property def python2(self): return exclusions.skip_if( lambda: sys.version_info >= (3,), "Python version 2.xx is required.", ) @property def python3(self): return exclusions.skip_if( lambda: sys.version_info < (3,), "Python version 3.xx is required." ) @property def python37(self): return exclusions.skip_if( lambda: sys.version_info < (3, 7), "Python version 3.7 or greater is required.", ) @property def cpython(self): return exclusions.only_if( lambda: util.cpython, "cPython interpreter needed" ) @property def non_broken_pickle(self): from sqlalchemy.util import pickle return exclusions.only_if( lambda: not util.pypy and pickle.__name__ == "cPickle" or sys.version_info >= (3, 2), "Needs cPickle+cPython or newer Python 3 pickle", ) @property def predictable_gc(self): """target platform must remove all cycles unconditionally when gc.collect() is called, as well as clean out unreferenced subclasses. """ return self.cpython @property def no_coverage(self): """Test should be skipped if coverage is enabled. This is to block tests that exercise libraries that seem to be sensitive to coverage, such as PostgreSQL notice logging. """ return exclusions.skip_if( lambda config: config.options.has_coverage, "Issues observed when coverage is enabled", ) def _has_mysql_on_windows(self, config): return False def _has_mysql_fully_case_sensitive(self, config): return False @property def sqlite(self): return exclusions.skip_if(lambda: not self._has_sqlite()) @property def cextensions(self): return exclusions.skip_if( lambda: not self._has_cextensions(), "C extensions not installed" ) def _has_sqlite(self): from sqlalchemy import create_engine try: create_engine("sqlite://") return True except ImportError: return False def _has_cextensions(self): try: from sqlalchemy import cresultproxy, cprocessors # noqa return True except ImportError: return False @property def computed_columns(self): "Supports computed columns" return exclusions.closed() @property def computed_columns_stored(self): "Supports computed columns with `persisted=True`" return exclusions.closed() @property def computed_columns_virtual(self): "Supports computed columns with `persisted=False`" return exclusions.closed() @property def computed_columns_default_persisted(self): """If the default persistence is virtual or stored when `persisted` is omitted""" return exclusions.closed() @property def computed_columns_reflect_persisted(self): """If persistence information is returned by the reflection of computed columns""" return exclusions.closed() @property def supports_is_distinct_from(self): """Supports some form of "x IS [NOT] DISTINCT FROM y" construct. Different dialects will implement their own flavour, e.g., sqlite will emit "x IS NOT y" instead of "x IS DISTINCT FROM y". .. seealso:: :meth:`.ColumnOperators.is_distinct_from` """ return exclusions.skip_if( lambda config: not config.db.dialect.supports_is_distinct_from, "driver doesn't support an IS DISTINCT FROM construct", )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/entities.py
# testing/entities.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import sqlalchemy as sa from .. import exc as sa_exc from ..util import compat _repr_stack = set() class BasicEntity(object): def __init__(self, **kw): for key, value in kw.items(): setattr(self, key, value) def __repr__(self): if id(self) in _repr_stack: return object.__repr__(self) _repr_stack.add(id(self)) try: return "%s(%s)" % ( (self.__class__.__name__), ", ".join( [ "%s=%r" % (key, getattr(self, key)) for key in sorted(self.__dict__.keys()) if not key.startswith("_") ] ), ) finally: _repr_stack.remove(id(self)) _recursion_stack = set() class ComparableEntity(BasicEntity): def __hash__(self): return hash(self.__class__) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): """'Deep, sparse compare. Deeply compare two entities, following the non-None attributes of the non-persisted object, if possible. """ if other is self: return True elif not self.__class__ == other.__class__: return False if id(self) in _recursion_stack: return True _recursion_stack.add(id(self)) try: # pick the entity that's not SA persisted as the source try: self_key = sa.orm.attributes.instance_state(self).key except sa.orm.exc.NO_STATE: self_key = None if other is None: a = self b = other elif self_key is not None: a = other b = self else: a = self b = other for attr in list(a.__dict__): if attr.startswith("_"): continue value = getattr(a, attr) try: # handle lazy loader errors battr = getattr(b, attr) except (AttributeError, sa_exc.UnboundExecutionError): return False if hasattr(value, "__iter__") and not isinstance( value, compat.string_types ): if hasattr(value, "__getitem__") and not hasattr( value, "keys" ): if list(value) != list(battr): return False else: if set(value) != set(battr): return False else: if value is not None and value != battr: return False return True finally: _recursion_stack.remove(id(self))
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/profiling.py
# testing/profiling.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Profiling support for unit and performance tests. These are special purpose profiling methods which operate in a more fine-grained way than nose's profiling plugin. """ import collections import contextlib import os import pstats import sys from . import config from .util import gc_collect from ..util import jython from ..util import pypy from ..util import update_wrapper from ..util import win32 try: import cProfile except ImportError: cProfile = None _current_test = None # ProfileStatsFile instance, set up in plugin_base _profile_stats = None class ProfileStatsFile(object): """"Store per-platform/fn profiling results in a file. We're still targeting Py2.5, 2.4 on 0.7 with no dependencies, so no json lib :( need to roll something silly """ def __init__(self, filename): self.force_write = ( config.options is not None and config.options.force_write_profiles ) self.write = self.force_write or ( config.options is not None and config.options.write_profiles ) self.fname = os.path.abspath(filename) self.short_fname = os.path.split(self.fname)[-1] self.data = collections.defaultdict( lambda: collections.defaultdict(dict) ) self._read() if self.write: # rewrite for the case where features changed, # etc. self._write() @property def platform_key(self): dbapi_key = config.db.name + "_" + config.db.driver if config.db.name == "sqlite" and config.db.dialect._is_url_file_db( config.db.url ): dbapi_key += "_file" # keep it at 2.7, 3.1, 3.2, etc. for now. py_version = ".".join([str(v) for v in sys.version_info[0:2]]) platform_tokens = [py_version] platform_tokens.append(dbapi_key) if jython: platform_tokens.append("jython") if pypy: platform_tokens.append("pypy") if win32: platform_tokens.append("win") platform_tokens.append( "nativeunicode" if config.db.dialect.convert_unicode else "dbapiunicode" ) _has_cext = config.requirements._has_cextensions() platform_tokens.append(_has_cext and "cextensions" or "nocextensions") return "_".join(platform_tokens) def has_stats(self): test_key = _current_test return ( test_key in self.data and self.platform_key in self.data[test_key] ) def result(self, callcount): test_key = _current_test per_fn = self.data[test_key] per_platform = per_fn[self.platform_key] if "counts" not in per_platform: per_platform["counts"] = counts = [] else: counts = per_platform["counts"] if "current_count" not in per_platform: per_platform["current_count"] = current_count = 0 else: current_count = per_platform["current_count"] has_count = len(counts) > current_count if not has_count: counts.append(callcount) if self.write: self._write() result = None else: result = per_platform["lineno"], counts[current_count] per_platform["current_count"] += 1 return result def replace(self, callcount): test_key = _current_test per_fn = self.data[test_key] per_platform = per_fn[self.platform_key] counts = per_platform["counts"] current_count = per_platform["current_count"] if current_count < len(counts): counts[current_count - 1] = callcount else: counts[-1] = callcount if self.write: self._write() def _header(self): return ( "# %s\n" "# This file is written out on a per-environment basis.\n" "# For each test in aaa_profiling, the corresponding " "function and \n" "# environment is located within this file. " "If it doesn't exist,\n" "# the test is skipped.\n" "# If a callcount does exist, it is compared " "to what we received. \n" "# assertions are raised if the counts do not match.\n" "# \n" "# To add a new callcount test, apply the function_call_count \n" "# decorator and re-run the tests using the --write-profiles \n" "# option - this file will be rewritten including the new count.\n" "# \n" ) % (self.fname) def _read(self): try: profile_f = open(self.fname) except IOError: return for lineno, line in enumerate(profile_f): line = line.strip() if not line or line.startswith("#"): continue test_key, platform_key, counts = line.split() per_fn = self.data[test_key] per_platform = per_fn[platform_key] c = [int(count) for count in counts.split(",")] per_platform["counts"] = c per_platform["lineno"] = lineno + 1 per_platform["current_count"] = 0 profile_f.close() def _write(self): print(("Writing profile file %s" % self.fname)) profile_f = open(self.fname, "w") profile_f.write(self._header()) for test_key in sorted(self.data): per_fn = self.data[test_key] profile_f.write("\n# TEST: %s\n\n" % test_key) for platform_key in sorted(per_fn): per_platform = per_fn[platform_key] c = ",".join(str(count) for count in per_platform["counts"]) profile_f.write("%s %s %s\n" % (test_key, platform_key, c)) profile_f.close() def function_call_count(variance=0.05, times=1, warmup=0): """Assert a target for a test case's function call count. The main purpose of this assertion is to detect changes in callcounts for various functions - the actual number is not as important. Callcounts are stored in a file keyed to Python version and OS platform information. This file is generated automatically for new tests, and versioned so that unexpected changes in callcounts will be detected. """ def decorate(fn): def wrap(*args, **kw): for warm in range(warmup): fn(*args, **kw) timerange = range(times) with count_functions(variance=variance): for time in timerange: rv = fn(*args, **kw) return rv return update_wrapper(wrap, fn) return decorate @contextlib.contextmanager def count_functions(variance=0.05): if cProfile is None: raise config._skip_test_exception("cProfile is not installed") if not _profile_stats.has_stats() and not _profile_stats.write: config.skip_test( "No profiling stats available on this " "platform for this function. Run tests with " "--write-profiles to add statistics to %s for " "this platform." % _profile_stats.short_fname ) gc_collect() pr = cProfile.Profile() pr.enable() # began = time.time() yield # ended = time.time() pr.disable() # s = compat.StringIO() stats = pstats.Stats(pr, stream=sys.stdout) # timespent = ended - began callcount = stats.total_calls expected = _profile_stats.result(callcount) if expected is None: expected_count = None else: line_no, expected_count = expected print(("Pstats calls: %d Expected %s" % (callcount, expected_count))) stats.sort_stats("cumulative") stats.print_stats() if expected_count: deviance = int(callcount * variance) failed = abs(callcount - expected_count) > deviance if failed or _profile_stats.force_write: if _profile_stats.write: _profile_stats.replace(callcount) else: raise AssertionError( "Adjusted function call count %s not within %s%% " "of expected %s, platform %s. Rerun with " "--write-profiles to " "regenerate this callcount." % ( callcount, (variance * 100), expected_count, _profile_stats.platform_key, ) )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/fixtures.py
# testing/fixtures.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re import sys import sqlalchemy as sa from . import assertions from . import config from . import schema from .engines import drop_all_tables from .entities import BasicEntity from .entities import ComparableEntity from .util import adict from .. import event from .. import util from ..ext.declarative import declarative_base from ..ext.declarative import DeclarativeMeta from ..schema import sort_tables_and_constraints # whether or not we use unittest changes things dramatically, # as far as how pytest collection works. class TestBase(object): # A sequence of database names to always run, regardless of the # constraints below. __whitelist__ = () # A sequence of requirement names matching testing.requires decorators __requires__ = () # A sequence of dialect names to exclude from the test class. __unsupported_on__ = () # If present, test class is only runnable for the *single* specified # dialect. If you need multiple, use __unsupported_on__ and invert. __only_on__ = None # A sequence of no-arg callables. If any are True, the entire testcase is # skipped. __skip_if__ = None def assert_(self, val, msg=None): assert val, msg # apparently a handful of tests are doing this....OK def setup(self): if hasattr(self, "setUp"): self.setUp() def teardown(self): if hasattr(self, "tearDown"): self.tearDown() @config.fixture() def connection(self): conn = config.db.connect() trans = conn.begin() try: yield conn finally: trans.rollback() conn.close() # propose a replacement for @testing.provide_metadata. # the problem with this is that TablesTest below has a ".metadata" # attribute already which is accessed directly as part of the # @testing.provide_metadata pattern. Might need to call this _metadata # for it to be useful. # @config.fixture() # def metadata(self): # """Provide bound MetaData for a single test, dropping afterwards.""" # # from . import engines # metadata = schema.MetaData(config.db) # try: # yield metadata # finally: # engines.drop_all_tables(metadata, config.db) class TablesTest(TestBase): # 'once', None run_setup_bind = "once" # 'once', 'each', None run_define_tables = "once" # 'once', 'each', None run_create_tables = "once" # 'once', 'each', None run_inserts = "each" # 'each', None run_deletes = "each" # 'once', None run_dispose_bind = None bind = None metadata = None tables = None other = None @classmethod def setup_class(cls): cls._init_class() cls._setup_once_tables() cls._setup_once_inserts() @classmethod def _init_class(cls): if cls.run_define_tables == "each": if cls.run_create_tables == "once": cls.run_create_tables = "each" assert cls.run_inserts in ("each", None) cls.other = adict() cls.tables = adict() cls.bind = cls.setup_bind() cls.metadata = sa.MetaData() cls.metadata.bind = cls.bind @classmethod def _setup_once_inserts(cls): if cls.run_inserts == "once": cls._load_fixtures() with cls.bind.begin() as conn: cls.insert_data(conn) @classmethod def _setup_once_tables(cls): if cls.run_define_tables == "once": cls.define_tables(cls.metadata) if cls.run_create_tables == "once": cls.metadata.create_all(cls.bind) cls.tables.update(cls.metadata.tables) def _setup_each_tables(self): if self.run_define_tables == "each": self.define_tables(self.metadata) if self.run_create_tables == "each": self.metadata.create_all(self.bind) self.tables.update(self.metadata.tables) elif self.run_create_tables == "each": self.metadata.create_all(self.bind) def _setup_each_inserts(self): if self.run_inserts == "each": self._load_fixtures() with self.bind.begin() as conn: self.insert_data(conn) def _teardown_each_tables(self): if self.run_define_tables == "each": self.tables.clear() if self.run_create_tables == "each": drop_all_tables(self.metadata, self.bind) self.metadata.clear() elif self.run_create_tables == "each": drop_all_tables(self.metadata, self.bind) # no need to run deletes if tables are recreated on setup if self.run_define_tables != "each" and self.run_deletes == "each": with self.bind.connect() as conn: for table in reversed( [ t for (t, fks) in sort_tables_and_constraints( self.metadata.tables.values() ) if t is not None ] ): try: conn.execute(table.delete()) except sa.exc.DBAPIError as ex: util.print_( ("Error emptying table %s: %r" % (table, ex)), file=sys.stderr, ) def setup(self): self._setup_each_tables() self._setup_each_inserts() def teardown(self): self._teardown_each_tables() @classmethod def _teardown_once_metadata_bind(cls): if cls.run_create_tables: drop_all_tables(cls.metadata, cls.bind) if cls.run_dispose_bind == "once": cls.dispose_bind(cls.bind) cls.metadata.bind = None if cls.run_setup_bind is not None: cls.bind = None @classmethod def teardown_class(cls): cls._teardown_once_metadata_bind() @classmethod def setup_bind(cls): return config.db @classmethod def dispose_bind(cls, bind): if hasattr(bind, "dispose"): bind.dispose() elif hasattr(bind, "close"): bind.close() @classmethod def define_tables(cls, metadata): pass @classmethod def fixtures(cls): return {} @classmethod def insert_data(cls, connection): pass def sql_count_(self, count, fn): self.assert_sql_count(self.bind, fn, count) def sql_eq_(self, callable_, statements): self.assert_sql(self.bind, callable_, statements) @classmethod def _load_fixtures(cls): """Insert rows as represented by the fixtures() method.""" headers, rows = {}, {} for table, data in cls.fixtures().items(): if len(data) < 2: continue if isinstance(table, util.string_types): table = cls.tables[table] headers[table] = data[0] rows[table] = data[1:] for table, fks in sort_tables_and_constraints( cls.metadata.tables.values() ): if table is None: continue if table not in headers: continue cls.bind.execute( table.insert(), [ dict(zip(headers[table], column_values)) for column_values in rows[table] ], ) class RemovesEvents(object): @util.memoized_property def _event_fns(self): return set() def event_listen(self, target, name, fn, **kw): self._event_fns.add((target, name, fn)) event.listen(target, name, fn, **kw) def teardown(self): for key in self._event_fns: event.remove(*key) super_ = super(RemovesEvents, self) if hasattr(super_, "teardown"): super_.teardown() class _ORMTest(object): @classmethod def teardown_class(cls): sa.orm.session.close_all_sessions() sa.orm.clear_mappers() class ORMTest(_ORMTest, TestBase): pass class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults): # 'once', 'each', None run_setup_classes = "once" # 'once', 'each', None run_setup_mappers = "each" classes = None @classmethod def setup_class(cls): cls._init_class() if cls.classes is None: cls.classes = adict() cls._setup_once_tables() cls._setup_once_classes() cls._setup_once_mappers() cls._setup_once_inserts() @classmethod def teardown_class(cls): cls._teardown_once_class() cls._teardown_once_metadata_bind() def setup(self): self._setup_each_tables() self._setup_each_classes() self._setup_each_mappers() self._setup_each_inserts() def teardown(self): sa.orm.session.close_all_sessions() self._teardown_each_mappers() self._teardown_each_classes() self._teardown_each_tables() @classmethod def _teardown_once_class(cls): cls.classes.clear() _ORMTest.teardown_class() @classmethod def _setup_once_classes(cls): if cls.run_setup_classes == "once": cls._with_register_classes(cls.setup_classes) @classmethod def _setup_once_mappers(cls): if cls.run_setup_mappers == "once": cls._with_register_classes(cls.setup_mappers) def _setup_each_mappers(self): if self.run_setup_mappers == "each": self._with_register_classes(self.setup_mappers) def _setup_each_classes(self): if self.run_setup_classes == "each": self._with_register_classes(self.setup_classes) @classmethod def _with_register_classes(cls, fn): """Run a setup method, framing the operation with a Base class that will catch new subclasses to be established within the "classes" registry. """ cls_registry = cls.classes class FindFixture(type): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls type.__init__(cls, classname, bases, dict_) class _Base(util.with_metaclass(FindFixture, object)): pass class Basic(BasicEntity, _Base): pass class Comparable(ComparableEntity, _Base): pass cls.Basic = Basic cls.Comparable = Comparable fn() def _teardown_each_mappers(self): # some tests create mappers in the test bodies # and will define setup_mappers as None - # clear mappers in any case if self.run_setup_mappers != "once": sa.orm.clear_mappers() def _teardown_each_classes(self): if self.run_setup_classes != "once": self.classes.clear() @classmethod def setup_classes(cls): pass @classmethod def setup_mappers(cls): pass class DeclarativeMappedTest(MappedTest): run_setup_classes = "once" run_setup_mappers = "once" @classmethod def _setup_once_tables(cls): pass @classmethod def _with_register_classes(cls, fn): cls_registry = cls.classes class FindFixtureDeclarative(DeclarativeMeta): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls DeclarativeMeta.__init__(cls, classname, bases, dict_) class DeclarativeBasic(object): __table_cls__ = schema.Table _DeclBase = declarative_base( metadata=cls.metadata, metaclass=FindFixtureDeclarative, cls=DeclarativeBasic, ) cls.DeclarativeBasic = _DeclBase # sets up cls.Basic which is helpful for things like composite # classes super(DeclarativeMappedTest, cls)._with_register_classes(fn) if cls.metadata.tables and cls.run_create_tables: cls.metadata.create_all(config.db) class ComputedReflectionFixtureTest(TablesTest): run_inserts = run_deletes = None __backend__ = True __requires__ = ("computed_columns", "table_reflection") regexp = re.compile(r"[\[\]\(\)\s`'\"]*") def normalize(self, text): return self.regexp.sub("", text).lower() @classmethod def define_tables(cls, metadata): from .. import Integer from .. import testing from ..schema import Column from ..schema import Computed from ..schema import Table Table( "computed_default_table", metadata, Column("id", Integer, primary_key=True), Column("normal", Integer), Column("computed_col", Integer, Computed("normal + 42")), Column("with_default", Integer, server_default="42"), ) t = Table( "computed_column_table", metadata, Column("id", Integer, primary_key=True), Column("normal", Integer), Column("computed_no_flag", Integer, Computed("normal + 42")), ) if testing.requires.schemas.enabled: t2 = Table( "computed_column_table", metadata, Column("id", Integer, primary_key=True), Column("normal", Integer), Column("computed_no_flag", Integer, Computed("normal / 42")), schema=config.test_schema, ) if testing.requires.computed_columns_virtual.enabled: t.append_column( Column( "computed_virtual", Integer, Computed("normal + 2", persisted=False), ) ) if testing.requires.schemas.enabled: t2.append_column( Column( "computed_virtual", Integer, Computed("normal / 2", persisted=False), ) ) if testing.requires.computed_columns_stored.enabled: t.append_column( Column( "computed_stored", Integer, Computed("normal - 42", persisted=True), ) ) if testing.requires.schemas.enabled: t2.append_column( Column( "computed_stored", Integer, Computed("normal * 42", persisted=True), ) )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/schema.py
# testing/schema.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import config from . import exclusions from .. import event from .. import schema __all__ = ["Table", "Column"] table_options = {} def Table(*args, **kw): """A schema.Table wrapper/hook for dialect-specific tweaks.""" test_opts = {k: kw.pop(k) for k in list(kw) if k.startswith("test_")} kw.update(table_options) if exclusions.against(config._current, "mysql"): if ( "mysql_engine" not in kw and "mysql_type" not in kw and "autoload_with" not in kw ): if "test_needs_fk" in test_opts or "test_needs_acid" in test_opts: kw["mysql_engine"] = "InnoDB" else: kw["mysql_engine"] = "MyISAM" # Apply some default cascading rules for self-referential foreign keys. # MySQL InnoDB has some issues around selecting self-refs too. if exclusions.against(config._current, "firebird"): table_name = args[0] unpack = config.db.dialect.identifier_preparer.unformat_identifiers # Only going after ForeignKeys in Columns. May need to # expand to ForeignKeyConstraint too. fks = [ fk for col in args if isinstance(col, schema.Column) for fk in col.foreign_keys ] for fk in fks: # root around in raw spec ref = fk._colspec if isinstance(ref, schema.Column): name = ref.table.name else: # take just the table name: on FB there cannot be # a schema, so the first element is always the # table name, possibly followed by the field name name = unpack(ref)[0] if name == table_name: if fk.ondelete is None: fk.ondelete = "CASCADE" if fk.onupdate is None: fk.onupdate = "CASCADE" return schema.Table(*args, **kw) def Column(*args, **kw): """A schema.Column wrapper/hook for dialect-specific tweaks.""" test_opts = {k: kw.pop(k) for k in list(kw) if k.startswith("test_")} if not config.requirements.foreign_key_ddl.enabled_for_config(config): args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)] col = schema.Column(*args, **kw) if test_opts.get("test_needs_autoincrement", False) and kw.get( "primary_key", False ): if col.default is None and col.server_default is None: col.autoincrement = True # allow any test suite to pick up on this col.info["test_needs_autoincrement"] = True # hardcoded rule for firebird, oracle; this should # be moved out if exclusions.against(config._current, "firebird", "oracle"): def add_seq(c, tbl): c._init_items( schema.Sequence( _truncate_name( config.db.dialect, tbl.name + "_" + c.name + "_seq" ), optional=True, ) ) event.listen(col, "after_parent_attach", add_seq, propagate=True) return col def _truncate_name(dialect, name): if len(name) > dialect.max_identifier_length: return ( name[0 : max(dialect.max_identifier_length - 6, 0)] + "_" + hex(hash(name) % 64)[2:] ) else: return name
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/mock.py
# testing/mock.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Import stub for mock library. """ from __future__ import absolute_import from ..util import py33 if py33: from unittest.mock import MagicMock from unittest.mock import Mock from unittest.mock import call from unittest.mock import patch from unittest.mock import ANY else: try: from mock import MagicMock # noqa from mock import Mock # noqa from mock import call # noqa from mock import patch # noqa from mock import ANY # noqa except ImportError: raise ImportError( "SQLAlchemy's test suite requires the " "'mock' library as of 0.8.2." )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/plugin/bootstrap.py
""" Bootstrapper for test framework plugins. The entire rationale for this system is to get the modules in plugin/ imported without importing all of the supporting library, so that we can set up things for testing before coverage starts. The rationale for all of plugin/ being *in* the supporting library in the first place is so that the testing and plugin suite is available to other libraries, mainly external SQLAlchemy and Alembic dialects, to make use of the same test environment and standard suites available to SQLAlchemy/Alembic themselves without the need to ship/install a separate package outside of SQLAlchemy. NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; this should be removable when Alembic targets SQLAlchemy 1.0.0. """ import os import sys bootstrap_file = locals()["bootstrap_file"] to_bootstrap = locals()["to_bootstrap"] def load_file_as_module(name): path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) if sys.version_info >= (3, 3): from importlib import machinery mod = machinery.SourceFileLoader(name, path).load_module() else: import imp mod = imp.load_source(name, path) return mod if to_bootstrap == "pytest": sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin") else: raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/plugin/plugin_base.py
# plugin/plugin_base.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Testing extensions. this module is designed to work as a testing-framework-agnostic library, created so that multiple test frameworks can be supported at once (mostly so that we can migrate to new ones). The current target is pytest. """ from __future__ import absolute_import import abc import re import sys py3k = sys.version_info >= (3, 0) if py3k: import configparser ABC = abc.ABC else: import ConfigParser as configparser import collections as collections_abc # noqa class ABC(object): __metaclass__ = abc.ABCMeta # late imports fixtures = None engines = None exclusions = None warnings = None profiling = None assertions = None requirements = None config = None testing = None util = None file_config = None logging = None include_tags = set() exclude_tags = set() options = None def setup_options(make_option): make_option( "--log-info", action="callback", type="string", callback=_log, help="turn on info logging for <LOG> (multiple OK)", ) make_option( "--log-debug", action="callback", type="string", callback=_log, help="turn on debug logging for <LOG> (multiple OK)", ) make_option( "--db", action="append", type="string", dest="db", help="Use prefab database uri. Multiple OK, " "first one is run by default.", ) make_option( "--dbs", action="callback", zeroarg_callback=_list_dbs, help="List available prefab dbs", ) make_option( "--dburi", action="append", type="string", dest="dburi", help="Database uri. Multiple OK, " "first one is run by default.", ) make_option( "--dropfirst", action="store_true", dest="dropfirst", help="Drop all tables in the target database first", ) make_option( "--backend-only", action="store_true", dest="backend_only", help="Run only tests marked with __backend__ or __sparse_backend__", ) make_option( "--nomemory", action="store_true", dest="nomemory", help="Don't run memory profiling tests", ) make_option( "--postgresql-templatedb", type="string", help="name of template database to use for PostgreSQL " "CREATE DATABASE (defaults to current database)", ) make_option( "--low-connections", action="store_true", dest="low_connections", help="Use a low number of distinct connections - " "i.e. for Oracle TNS", ) make_option( "--write-idents", type="string", dest="write_idents", help="write out generated follower idents to <file>, " "when -n<num> is used", ) make_option( "--reversetop", action="store_true", dest="reversetop", default=False, help="Use a random-ordering set implementation in the ORM " "(helps reveal dependency issues)", ) make_option( "--requirements", action="callback", type="string", callback=_requirements_opt, help="requirements class for testing, overrides setup.cfg", ) make_option( "--with-cdecimal", action="store_true", dest="cdecimal", default=False, help="Monkeypatch the cdecimal library into Python 'decimal' " "for all tests", ) make_option( "--include-tag", action="callback", callback=_include_tag, type="string", help="Include tests with tag <tag>", ) make_option( "--exclude-tag", action="callback", callback=_exclude_tag, type="string", help="Exclude tests with tag <tag>", ) make_option( "--write-profiles", action="store_true", dest="write_profiles", default=False, help="Write/update failing profiling data.", ) make_option( "--force-write-profiles", action="store_true", dest="force_write_profiles", default=False, help="Unconditionally write/update profiling data.", ) def configure_follower(follower_ident): """Configure required state for a follower. This invokes in the parent process and typically includes database creation. """ from sqlalchemy.testing import provision provision.FOLLOWER_IDENT = follower_ident def memoize_important_follower_config(dict_): """Store important configuration we will need to send to a follower. This invokes in the parent process after normal config is set up. This is necessary as pytest seems to not be using forking, so we start with nothing in memory, *but* it isn't running our argparse callables, so we have to just copy all of that over. """ dict_["memoized_config"] = { "include_tags": include_tags, "exclude_tags": exclude_tags, } def restore_important_follower_config(dict_): """Restore important configuration needed by a follower. This invokes in the follower process. """ global include_tags, exclude_tags include_tags.update(dict_["memoized_config"]["include_tags"]) exclude_tags.update(dict_["memoized_config"]["exclude_tags"]) def read_config(): global file_config file_config = configparser.ConfigParser() file_config.read(["setup.cfg", "test.cfg"]) def pre_begin(opt): """things to set up early, before coverage might be setup.""" global options options = opt for fn in pre_configure: fn(options, file_config) def set_coverage_flag(value): options.has_coverage = value def post_begin(): """things to set up later, once we know coverage is running.""" # Lazy setup of other options (post coverage) for fn in post_configure: fn(options, file_config) # late imports, has to happen after config. global util, fixtures, engines, exclusions, assertions global warnings, profiling, config, testing from sqlalchemy import testing # noqa from sqlalchemy.testing import fixtures, engines, exclusions # noqa from sqlalchemy.testing import assertions, warnings, profiling # noqa from sqlalchemy.testing import config # noqa from sqlalchemy import util # noqa warnings.setup_filters() def _log(opt_str, value, parser): global logging if not logging: import logging logging.basicConfig() if opt_str.endswith("-info"): logging.getLogger(value).setLevel(logging.INFO) elif opt_str.endswith("-debug"): logging.getLogger(value).setLevel(logging.DEBUG) def _list_dbs(*args): print("Available --db options (use --dburi to override)") for macro in sorted(file_config.options("db")): print("%20s\t%s" % (macro, file_config.get("db", macro))) sys.exit(0) def _requirements_opt(opt_str, value, parser): _setup_requirements(value) def _exclude_tag(opt_str, value, parser): exclude_tags.add(value.replace("-", "_")) def _include_tag(opt_str, value, parser): include_tags.add(value.replace("-", "_")) pre_configure = [] post_configure = [] def pre(fn): pre_configure.append(fn) return fn def post(fn): post_configure.append(fn) return fn @pre def _setup_options(opt, file_config): global options options = opt @pre def _set_nomemory(opt, file_config): if opt.nomemory: exclude_tags.add("memory_intensive") @pre def _monkeypatch_cdecimal(options, file_config): if options.cdecimal: import cdecimal sys.modules["decimal"] = cdecimal @post def _init_symbols(options, file_config): from sqlalchemy.testing import config config._fixture_functions = _fixture_fn_class() @post def _engine_uri(options, file_config): from sqlalchemy.testing import config from sqlalchemy import testing from sqlalchemy.testing import provision if options.dburi: db_urls = list(options.dburi) else: db_urls = [] if options.db: for db_token in options.db: for db in re.split(r"[,\s]+", db_token): if db not in file_config.options("db"): raise RuntimeError( "Unknown URI specifier '%s'. " "Specify --dbs for known uris." % db ) else: db_urls.append(file_config.get("db", db)) if not db_urls: db_urls.append(file_config.get("db", "default")) config._current = None for db_url in db_urls: if options.write_idents and provision.FOLLOWER_IDENT: # != 'master': with open(options.write_idents, "a") as file_: file_.write(provision.FOLLOWER_IDENT + " " + db_url + "\n") cfg = provision.setup_config( db_url, options, file_config, provision.FOLLOWER_IDENT ) if not config._current: cfg.set_as_current(cfg, testing) @post def _requirements(options, file_config): requirement_cls = file_config.get("sqla_testing", "requirement_cls") _setup_requirements(requirement_cls) def _setup_requirements(argument): from sqlalchemy.testing import config from sqlalchemy import testing if config.requirements is not None: return modname, clsname = argument.split(":") # importlib.import_module() only introduced in 2.7, a little # late mod = __import__(modname) for component in modname.split(".")[1:]: mod = getattr(mod, component) req_cls = getattr(mod, clsname) config.requirements = testing.requires = req_cls() @post def _prep_testing_database(options, file_config): from sqlalchemy.testing import config, util from sqlalchemy.testing.exclusions import against from sqlalchemy import schema, inspect if options.dropfirst: for cfg in config.Config.all_configs(): e = cfg.db inspector = inspect(e) try: view_names = inspector.get_view_names() except NotImplementedError: pass else: for vname in view_names: e.execute( schema._DropView( schema.Table(vname, schema.MetaData()) ) ) if config.requirements.schemas.enabled_for_config(cfg): try: view_names = inspector.get_view_names(schema="test_schema") except NotImplementedError: pass else: for vname in view_names: e.execute( schema._DropView( schema.Table( vname, schema.MetaData(), schema="test_schema", ) ) ) util.drop_all_tables(e, inspector) if config.requirements.schemas.enabled_for_config(cfg): util.drop_all_tables(e, inspector, schema=cfg.test_schema) if against(cfg, "postgresql"): from sqlalchemy.dialects import postgresql for enum in inspector.get_enums("*"): e.execute( postgresql.DropEnumType( postgresql.ENUM( name=enum["name"], schema=enum["schema"] ) ) ) @post def _reverse_topological(options, file_config): if options.reversetop: from sqlalchemy.orm.util import randomize_unitofwork randomize_unitofwork() @post def _post_setup_options(opt, file_config): from sqlalchemy.testing import config config.options = options config.file_config = file_config @post def _setup_profiling(options, file_config): from sqlalchemy.testing import profiling profiling._profile_stats = profiling.ProfileStatsFile( file_config.get("sqla_testing", "profile_file") ) def want_class(name, cls): if not issubclass(cls, fixtures.TestBase): return False elif name.startswith("_"): return False elif ( config.options.backend_only and not getattr(cls, "__backend__", False) and not getattr(cls, "__sparse_backend__", False) ): return False else: return True def want_method(cls, fn): if not fn.__name__.startswith("test_"): return False elif fn.__module__ is None: return False elif include_tags: return ( hasattr(cls, "__tags__") and exclusions.tags(cls.__tags__).include_test( include_tags, exclude_tags ) ) or ( hasattr(fn, "_sa_exclusion_extend") and fn._sa_exclusion_extend.include_test( include_tags, exclude_tags ) ) elif exclude_tags and hasattr(cls, "__tags__"): return exclusions.tags(cls.__tags__).include_test( include_tags, exclude_tags ) elif exclude_tags and hasattr(fn, "_sa_exclusion_extend"): return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags) else: return True def generate_sub_tests(cls, module): if getattr(cls, "__backend__", False) or getattr( cls, "__sparse_backend__", False ): sparse = getattr(cls, "__sparse_backend__", False) for cfg in _possible_configs_for_cls(cls, sparse=sparse): orig_name = cls.__name__ # we can have special chars in these names except for the # pytest junit plugin, which is tripped up by the brackets # and periods, so sanitize alpha_name = re.sub(r"[_\[\]\.]+", "_", cfg.name) alpha_name = re.sub(r"_+$", "", alpha_name) name = "%s_%s" % (cls.__name__, alpha_name) subcls = type( name, (cls,), {"_sa_orig_cls_name": orig_name, "__only_on_config__": cfg}, ) setattr(module, name, subcls) yield subcls else: yield cls def start_test_class(cls): _do_skips(cls) _setup_engine(cls) def stop_test_class(cls): # from sqlalchemy import inspect # assert not inspect(testing.db).get_table_names() engines.testing_reaper._stop_test_ctx() try: if not options.low_connections: assertions.global_cleanup_assertions() finally: _restore_engine() def _restore_engine(): config._current.reset(testing) def final_process_cleanup(): engines.testing_reaper._stop_test_ctx_aggressive() assertions.global_cleanup_assertions() _restore_engine() def _setup_engine(cls): if getattr(cls, "__engine_options__", None): eng = engines.testing_engine(options=cls.__engine_options__) config._current.push_engine(eng, testing) def before_test(test, test_module_name, test_class, test_name): # format looks like: # "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause" name = getattr(test_class, "_sa_orig_cls_name", test_class.__name__) id_ = "%s.%s.%s" % (test_module_name, name, test_name) profiling._current_test = id_ def after_test(test): engines.testing_reaper._after_test_ctx() def _possible_configs_for_cls(cls, reasons=None, sparse=False): all_configs = set(config.Config.all_configs()) if cls.__unsupported_on__: spec = exclusions.db_spec(*cls.__unsupported_on__) for config_obj in list(all_configs): if spec(config_obj): all_configs.remove(config_obj) if getattr(cls, "__only_on__", None): spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) for config_obj in list(all_configs): if not spec(config_obj): all_configs.remove(config_obj) if getattr(cls, "__only_on_config__", None): all_configs.intersection_update([cls.__only_on_config__]) if hasattr(cls, "__requires__"): requirements = config.requirements for config_obj in list(all_configs): for requirement in cls.__requires__: check = getattr(requirements, requirement) skip_reasons = check.matching_config_reasons(config_obj) if skip_reasons: all_configs.remove(config_obj) if reasons is not None: reasons.extend(skip_reasons) break if hasattr(cls, "__prefer_requires__"): non_preferred = set() requirements = config.requirements for config_obj in list(all_configs): for requirement in cls.__prefer_requires__: check = getattr(requirements, requirement) if not check.enabled_for_config(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) if sparse: # pick only one config from each base dialect # sorted so we get the same backend each time selecting the highest # server version info. per_dialect = {} for cfg in reversed( sorted( all_configs, key=lambda cfg: ( cfg.db.name, cfg.db.dialect.server_version_info, ), ) ): db = cfg.db.name if db not in per_dialect: per_dialect[db] = cfg return per_dialect.values() return all_configs def _do_skips(cls): reasons = [] all_configs = _possible_configs_for_cls(cls, reasons) if getattr(cls, "__skip_if__", False): for c in getattr(cls, "__skip_if__"): if c(): config.skip_test( "'%s' skipped by %s" % (cls.__name__, c.__name__) ) if not all_configs: msg = "'%s' unsupported on any DB implementation %s%s" % ( cls.__name__, ", ".join( "'%s(%s)+%s'" % ( config_obj.db.name, ".".join( str(dig) for dig in exclusions._server_version(config_obj.db) ), config_obj.db.driver, ) for config_obj in config.Config.all_configs() ), ", ".join(reasons), ) config.skip_test(msg) elif hasattr(cls, "__prefer_backends__"): non_preferred = set() spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) for config_obj in all_configs: if not spec(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) if config._current not in all_configs: _setup_config(all_configs.pop(), cls) def _setup_config(config_obj, ctx): config._current.push(config_obj, testing) class FixtureFunctions(ABC): @abc.abstractmethod def skip_test_exception(self, *arg, **kw): raise NotImplementedError() @abc.abstractmethod def combinations(self, *args, **kw): raise NotImplementedError() @abc.abstractmethod def param_ident(self, *args, **kw): raise NotImplementedError() @abc.abstractmethod def fixture(self, *arg, **kw): raise NotImplementedError() def get_current_test_name(self): raise NotImplementedError() _fixture_fn_class = None def set_fixture_functions(fixture_fn_class): global _fixture_fn_class _fixture_fn_class = fixture_fn_class
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/plugin/pytestplugin.py
try: # installed by bootstrap.py import sqla_plugin_base as plugin_base except ImportError: # assume we're a package, use traditional import from . import plugin_base import argparse import collections from functools import update_wrapper import inspect import itertools import operator import os import re import sys import pytest try: import typing except ImportError: pass else: if typing.TYPE_CHECKING: from typing import Sequence try: import xdist # noqa has_xdist = True except ImportError: has_xdist = False def pytest_addoption(parser): group = parser.getgroup("sqlalchemy") def make_option(name, **kw): callback_ = kw.pop("callback", None) if callback_: class CallableAction(argparse.Action): def __call__( self, parser, namespace, values, option_string=None ): callback_(option_string, values, parser) kw["action"] = CallableAction zeroarg_callback = kw.pop("zeroarg_callback", None) if zeroarg_callback: class CallableAction(argparse.Action): def __init__( self, option_strings, dest, default=False, required=False, help=None, # noqa ): super(CallableAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=True, default=default, required=required, help=help, ) def __call__( self, parser, namespace, values, option_string=None ): zeroarg_callback(option_string, values, parser) kw["action"] = CallableAction group.addoption(name, **kw) plugin_base.setup_options(make_option) plugin_base.read_config() def pytest_configure(config): pytest.register_assert_rewrite("sqlalchemy.testing.assertions") if hasattr(config, "slaveinput"): plugin_base.restore_important_follower_config(config.slaveinput) plugin_base.configure_follower(config.slaveinput["follower_ident"]) else: if config.option.write_idents and os.path.exists( config.option.write_idents ): os.remove(config.option.write_idents) plugin_base.pre_begin(config.option) plugin_base.set_coverage_flag( bool(getattr(config.option, "cov_source", False)) ) plugin_base.set_fixture_functions(PytestFixtureFunctions) def pytest_sessionstart(session): plugin_base.post_begin() def pytest_sessionfinish(session): plugin_base.final_process_cleanup() if has_xdist: import uuid def pytest_configure_node(node): # the master for each node fills slaveinput dictionary # which pytest-xdist will transfer to the subprocess plugin_base.memoize_important_follower_config(node.slaveinput) node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12] from sqlalchemy.testing import provision provision.create_follower_db(node.slaveinput["follower_ident"]) def pytest_testnodedown(node, error): from sqlalchemy.testing import provision provision.drop_follower_db(node.slaveinput["follower_ident"]) def pytest_collection_modifyitems(session, config, items): # look for all those classes that specify __backend__ and # expand them out into per-database test cases. # this is much easier to do within pytest_pycollect_makeitem, however # pytest is iterating through cls.__dict__ as makeitem is # called which causes a "dictionary changed size" error on py3k. # I'd submit a pullreq for them to turn it into a list first, but # it's to suit the rather odd use case here which is that we are adding # new classes to a module on the fly. rebuilt_items = collections.defaultdict( lambda: collections.defaultdict(list) ) items[:] = [ item for item in items if isinstance(item.parent, pytest.Instance) and not item.parent.parent.name.startswith("_") ] test_classes = set(item.parent for item in items) for test_class in test_classes: for sub_cls in plugin_base.generate_sub_tests( test_class.cls, test_class.parent.module ): if sub_cls is not test_class.cls: per_cls_dict = rebuilt_items[test_class.cls] # in pytest 5.4.0 # for inst in pytest.Class.from_parent( # test_class.parent.parent, name=sub_cls.__name__ # ).collect(): for inst in pytest.Class( sub_cls.__name__, parent=test_class.parent.parent ).collect(): for t in inst.collect(): per_cls_dict[t.name].append(t) newitems = [] for item in items: if item.parent.cls in rebuilt_items: newitems.extend(rebuilt_items[item.parent.cls][item.name]) else: newitems.append(item) # seems like the functions attached to a test class aren't sorted already? # is that true and why's that? (when using unittest, they're sorted) items[:] = sorted( newitems, key=lambda item: ( item.parent.parent.parent.name, item.parent.parent.name, item.name, ), ) def pytest_pycollect_makeitem(collector, name, obj): if inspect.isclass(obj) and plugin_base.want_class(name, obj): # in pytest 5.4.0 # return [ # pytest.Class.from_parent(collector, # name=parametrize_cls.__name__) # for parametrize_cls in _parametrize_cls(collector.module, obj) # ] return [ pytest.Class(parametrize_cls.__name__, parent=collector) for parametrize_cls in _parametrize_cls(collector.module, obj) ] elif ( inspect.isfunction(obj) and isinstance(collector, pytest.Instance) and plugin_base.want_method(collector.cls, obj) ): # None means, fall back to default logic, which includes # method-level parametrize return None else: # empty list means skip this item return [] _current_class = None def _parametrize_cls(module, cls): """implement a class-based version of pytest parametrize.""" if "_sa_parametrize" not in cls.__dict__: return [cls] _sa_parametrize = cls._sa_parametrize classes = [] for full_param_set in itertools.product( *[params for argname, params in _sa_parametrize] ): cls_variables = {} for argname, param in zip( [_sa_param[0] for _sa_param in _sa_parametrize], full_param_set ): if not argname: raise TypeError("need argnames for class-based combinations") argname_split = re.split(r",\s*", argname) for arg, val in zip(argname_split, param.values): cls_variables[arg] = val parametrized_name = "_".join( # token is a string, but in py2k pytest is giving us a unicode, # so call str() on it. str(re.sub(r"\W", "", token)) for param in full_param_set for token in param.id.split("-") ) name = "%s_%s" % (cls.__name__, parametrized_name) newcls = type.__new__(type, name, (cls,), cls_variables) setattr(module, name, newcls) classes.append(newcls) return classes def pytest_runtest_setup(item): # here we seem to get called only based on what we collected # in pytest_collection_modifyitems. So to do class-based stuff # we have to tear that out. global _current_class if not isinstance(item, pytest.Function): return # ... so we're doing a little dance here to figure it out... if _current_class is None: class_setup(item.parent.parent) _current_class = item.parent.parent # this is needed for the class-level, to ensure that the # teardown runs after the class is completed with its own # class-level teardown... def finalize(): global _current_class class_teardown(item.parent.parent) _current_class = None item.parent.parent.addfinalizer(finalize) test_setup(item) def pytest_runtest_teardown(item): # ...but this works better as the hook here rather than # using a finalizer, as the finalizer seems to get in the way # of the test reporting failures correctly (you get a bunch of # pytest assertion stuff instead) test_teardown(item) def test_setup(item): plugin_base.before_test( item, item.parent.module.__name__, item.parent.cls, item.name ) def test_teardown(item): plugin_base.after_test(item) def class_setup(item): plugin_base.start_test_class(item.cls) def class_teardown(item): plugin_base.stop_test_class(item.cls) def getargspec(fn): if sys.version_info.major == 3: return inspect.getfullargspec(fn) else: return inspect.getargspec(fn) def _pytest_fn_decorator(target): """Port of langhelpers.decorator with pytest-specific tricks.""" from sqlalchemy.util.langhelpers import format_argspec_plus from sqlalchemy.util.compat import inspect_getfullargspec def _exec_code_in_env(code, env, fn_name): exec(code, env) return env[fn_name] def decorate(fn, add_positional_parameters=()): spec = inspect_getfullargspec(fn) if add_positional_parameters: spec.args.extend(add_positional_parameters) metadata = dict(target="target", fn="fn", name=fn.__name__) metadata.update(format_argspec_plus(spec, grouped=False)) code = ( """\ def %(name)s(%(args)s): return %(target)s(%(fn)s, %(apply_kw)s) """ % metadata ) decorated = _exec_code_in_env( code, {"target": target, "fn": fn}, fn.__name__ ) if not add_positional_parameters: decorated.__defaults__ = getattr(fn, "im_func", fn).__defaults__ decorated.__wrapped__ = fn return update_wrapper(decorated, fn) else: # this is the pytest hacky part. don't do a full update wrapper # because pytest is really being sneaky about finding the args # for the wrapped function decorated.__module__ = fn.__module__ decorated.__name__ = fn.__name__ return decorated return decorate class PytestFixtureFunctions(plugin_base.FixtureFunctions): def skip_test_exception(self, *arg, **kw): return pytest.skip.Exception(*arg, **kw) _combination_id_fns = { "i": lambda obj: obj, "r": repr, "s": str, "n": operator.attrgetter("__name__"), } def combinations(self, *arg_sets, **kw): """facade for pytest.mark.paramtrize. Automatically derives argument names from the callable which in our case is always a method on a class with positional arguments. ids for parameter sets are derived using an optional template. """ from sqlalchemy.testing import exclusions if sys.version_info.major == 3: if len(arg_sets) == 1 and hasattr(arg_sets[0], "__next__"): arg_sets = list(arg_sets[0]) else: if len(arg_sets) == 1 and hasattr(arg_sets[0], "next"): arg_sets = list(arg_sets[0]) argnames = kw.pop("argnames", None) def _filter_exclusions(args): result = [] gathered_exclusions = [] for a in args: if isinstance(a, exclusions.compound): gathered_exclusions.append(a) else: result.append(a) return result, gathered_exclusions id_ = kw.pop("id_", None) tobuild_pytest_params = [] has_exclusions = False if id_: _combination_id_fns = self._combination_id_fns # because itemgetter is not consistent for one argument vs. # multiple, make it multiple in all cases and use a slice # to omit the first argument _arg_getter = operator.itemgetter( 0, *[ idx for idx, char in enumerate(id_) if char in ("n", "r", "s", "a") ] ) fns = [ (operator.itemgetter(idx), _combination_id_fns[char]) for idx, char in enumerate(id_) if char in _combination_id_fns ] for arg in arg_sets: if not isinstance(arg, tuple): arg = (arg,) fn_params, param_exclusions = _filter_exclusions(arg) parameters = _arg_getter(fn_params)[1:] if param_exclusions: has_exclusions = True tobuild_pytest_params.append( ( parameters, param_exclusions, "-".join( comb_fn(getter(arg)) for getter, comb_fn in fns ), ) ) else: for arg in arg_sets: if not isinstance(arg, tuple): arg = (arg,) fn_params, param_exclusions = _filter_exclusions(arg) if param_exclusions: has_exclusions = True tobuild_pytest_params.append( (fn_params, param_exclusions, None) ) pytest_params = [] for parameters, param_exclusions, id_ in tobuild_pytest_params: if has_exclusions: parameters += (param_exclusions,) param = pytest.param(*parameters, id=id_) pytest_params.append(param) def decorate(fn): if inspect.isclass(fn): if has_exclusions: raise NotImplementedError( "exclusions not supported for class level combinations" ) if "_sa_parametrize" not in fn.__dict__: fn._sa_parametrize = [] fn._sa_parametrize.append((argnames, pytest_params)) return fn else: if argnames is None: _argnames = getargspec(fn).args[1:] # type: Sequence(str) else: _argnames = re.split( r", *", argnames ) # type: Sequence(str) if has_exclusions: _argnames += ["_exclusions"] @_pytest_fn_decorator def check_exclusions(fn, *args, **kw): _exclusions = args[-1] if _exclusions: exlu = exclusions.compound().add(*_exclusions) fn = exlu(fn) return fn(*args[0:-1], **kw) def process_metadata(spec): spec.args.append("_exclusions") fn = check_exclusions( fn, add_positional_parameters=("_exclusions",) ) return pytest.mark.parametrize(_argnames, pytest_params)(fn) return decorate def param_ident(self, *parameters): ident = parameters[0] return pytest.param(*parameters[1:], id=ident) def fixture(self, *arg, **kw): return pytest.fixture(*arg, **kw) def get_current_test_name(self): return os.environ.get("PYTEST_CURRENT_TEST")
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_sequence.py
from .. import config from .. import fixtures from ..assertions import eq_ from ..config import requirements from ..schema import Column from ..schema import Table from ... import Integer from ... import MetaData from ... import schema from ... import Sequence from ... import String from ... import testing class SequenceTest(fixtures.TablesTest): __requires__ = ("sequences",) __backend__ = True run_create_tables = "each" @classmethod def define_tables(cls, metadata): Table( "seq_pk", metadata, Column("id", Integer, Sequence("tab_id_seq"), primary_key=True), Column("data", String(50)), ) Table( "seq_opt_pk", metadata, Column( "id", Integer, Sequence("tab_id_seq", optional=True), primary_key=True, ), Column("data", String(50)), ) def test_insert_roundtrip(self): config.db.execute(self.tables.seq_pk.insert(), data="some data") self._assert_round_trip(self.tables.seq_pk, config.db) def test_insert_lastrowid(self): r = config.db.execute(self.tables.seq_pk.insert(), data="some data") eq_(r.inserted_primary_key, [1]) def test_nextval_direct(self): r = config.db.execute(self.tables.seq_pk.c.id.default) eq_(r, 1) @requirements.sequences_optional def test_optional_seq(self): r = config.db.execute( self.tables.seq_opt_pk.insert(), data="some data" ) eq_(r.inserted_primary_key, [1]) def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_(row, (1, "some data")) class SequenceCompilerTest(testing.AssertsCompiledSQL, fixtures.TestBase): __requires__ = ("sequences",) __backend__ = True def test_literal_binds_inline_compile(self): table = Table( "x", MetaData(), Column("y", Integer, Sequence("y_seq")), Column("q", Integer), ) stmt = table.insert().values(q=5) seq_nextval = testing.db.dialect.statement_compiler( statement=None, dialect=testing.db.dialect ).visit_sequence(Sequence("y_seq")) self.assert_compile( stmt, "INSERT INTO x (y, q) VALUES (%s, 5)" % (seq_nextval,), literal_binds=True, dialect=testing.db.dialect, ) class HasSequenceTest(fixtures.TestBase): __requires__ = ("sequences",) __backend__ = True def test_has_sequence(self): s1 = Sequence("user_id_seq") testing.db.execute(schema.CreateSequence(s1)) try: eq_( testing.db.dialect.has_sequence(testing.db, "user_id_seq"), True, ) finally: testing.db.execute(schema.DropSequence(s1)) @testing.requires.schemas def test_has_sequence_schema(self): s1 = Sequence("user_id_seq", schema=config.test_schema) testing.db.execute(schema.CreateSequence(s1)) try: eq_( testing.db.dialect.has_sequence( testing.db, "user_id_seq", schema=config.test_schema ), True, ) finally: testing.db.execute(schema.DropSequence(s1)) def test_has_sequence_neg(self): eq_(testing.db.dialect.has_sequence(testing.db, "user_id_seq"), False) @testing.requires.schemas def test_has_sequence_schemas_neg(self): eq_( testing.db.dialect.has_sequence( testing.db, "user_id_seq", schema=config.test_schema ), False, ) @testing.requires.schemas def test_has_sequence_default_not_in_remote(self): s1 = Sequence("user_id_seq") testing.db.execute(schema.CreateSequence(s1)) try: eq_( testing.db.dialect.has_sequence( testing.db, "user_id_seq", schema=config.test_schema ), False, ) finally: testing.db.execute(schema.DropSequence(s1)) @testing.requires.schemas def test_has_sequence_remote_not_in_default(self): s1 = Sequence("user_id_seq", schema=config.test_schema) testing.db.execute(schema.CreateSequence(s1)) try: eq_( testing.db.dialect.has_sequence(testing.db, "user_id_seq"), False, ) finally: testing.db.execute(schema.DropSequence(s1))
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_results.py
import datetime from .. import config from .. import engines from .. import fixtures from ..assertions import eq_ from ..config import requirements from ..schema import Column from ..schema import Table from ... import DateTime from ... import func from ... import Integer from ... import select from ... import sql from ... import String from ... import testing from ... import text class RowFetchTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "plain_pk", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), ) Table( "has_dates", metadata, Column("id", Integer, primary_key=True), Column("today", DateTime), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.plain_pk.insert(), [ {"id": 1, "data": "d1"}, {"id": 2, "data": "d2"}, {"id": 3, "data": "d3"}, ], ) connection.execute( cls.tables.has_dates.insert(), [{"id": 1, "today": datetime.datetime(2006, 5, 12, 12, 0, 0)}], ) def test_via_string(self): row = config.db.execute( self.tables.plain_pk.select().order_by(self.tables.plain_pk.c.id) ).first() eq_(row["id"], 1) eq_(row["data"], "d1") def test_via_int(self): row = config.db.execute( self.tables.plain_pk.select().order_by(self.tables.plain_pk.c.id) ).first() eq_(row[0], 1) eq_(row[1], "d1") def test_via_col_object(self): row = config.db.execute( self.tables.plain_pk.select().order_by(self.tables.plain_pk.c.id) ).first() eq_(row[self.tables.plain_pk.c.id], 1) eq_(row[self.tables.plain_pk.c.data], "d1") @requirements.duplicate_names_in_cursor_description def test_row_with_dupe_names(self): result = config.db.execute( select( [ self.tables.plain_pk.c.data, self.tables.plain_pk.c.data.label("data"), ] ).order_by(self.tables.plain_pk.c.id) ) row = result.first() eq_(result.keys(), ["data", "data"]) eq_(row, ("d1", "d1")) def test_row_w_scalar_select(self): """test that a scalar select as a column is returned as such and that type conversion works OK. (this is half a SQLAlchemy Core test and half to catch database backends that may have unusual behavior with scalar selects.) """ datetable = self.tables.has_dates s = select([datetable.alias("x").c.today]).as_scalar() s2 = select([datetable.c.id, s.label("somelabel")]) row = config.db.execute(s2).first() eq_(row["somelabel"], datetime.datetime(2006, 5, 12, 12, 0, 0)) class PercentSchemaNamesTest(fixtures.TablesTest): """tests using percent signs, spaces in table and column names. This is a very fringe use case, doesn't work for MySQL or PostgreSQL. the requirement, "percent_schema_names", is marked "skip" by default. """ __requires__ = ("percent_schema_names",) __backend__ = True @classmethod def define_tables(cls, metadata): cls.tables.percent_table = Table( "percent%table", metadata, Column("percent%", Integer), Column("spaces % more spaces", Integer), ) cls.tables.lightweight_percent_table = sql.table( "percent%table", sql.column("percent%"), sql.column("spaces % more spaces"), ) def test_single_roundtrip(self): percent_table = self.tables.percent_table for params in [ {"percent%": 5, "spaces % more spaces": 12}, {"percent%": 7, "spaces % more spaces": 11}, {"percent%": 9, "spaces % more spaces": 10}, {"percent%": 11, "spaces % more spaces": 9}, ]: config.db.execute(percent_table.insert(), params) self._assert_table() def test_executemany_roundtrip(self): percent_table = self.tables.percent_table config.db.execute( percent_table.insert(), {"percent%": 5, "spaces % more spaces": 12} ) config.db.execute( percent_table.insert(), [ {"percent%": 7, "spaces % more spaces": 11}, {"percent%": 9, "spaces % more spaces": 10}, {"percent%": 11, "spaces % more spaces": 9}, ], ) self._assert_table() def _assert_table(self): percent_table = self.tables.percent_table lightweight_percent_table = self.tables.lightweight_percent_table for table in ( percent_table, percent_table.alias(), lightweight_percent_table, lightweight_percent_table.alias(), ): eq_( list( config.db.execute( table.select().order_by(table.c["percent%"]) ) ), [(5, 12), (7, 11), (9, 10), (11, 9)], ) eq_( list( config.db.execute( table.select() .where(table.c["spaces % more spaces"].in_([9, 10])) .order_by(table.c["percent%"]) ) ), [(9, 10), (11, 9)], ) row = config.db.execute( table.select().order_by(table.c["percent%"]) ).first() eq_(row["percent%"], 5) eq_(row["spaces % more spaces"], 12) eq_(row[table.c["percent%"]], 5) eq_(row[table.c["spaces % more spaces"]], 12) config.db.execute( percent_table.update().values( {percent_table.c["spaces % more spaces"]: 15} ) ) eq_( list( config.db.execute( percent_table.select().order_by( percent_table.c["percent%"] ) ) ), [(5, 15), (7, 15), (9, 15), (11, 15)], ) class ServerSideCursorsTest( fixtures.TestBase, testing.AssertsExecutionResults ): __requires__ = ("server_side_cursors",) __backend__ = True def _is_server_side(self, cursor): if self.engine.dialect.driver == "psycopg2": return bool(cursor.name) elif self.engine.dialect.driver == "pymysql": sscursor = __import__("pymysql.cursors").cursors.SSCursor return isinstance(cursor, sscursor) elif self.engine.dialect.driver == "mysqldb": sscursor = __import__("MySQLdb.cursors").cursors.SSCursor return isinstance(cursor, sscursor) else: return False def _fixture(self, server_side_cursors): self.engine = engines.testing_engine( options={"server_side_cursors": server_side_cursors} ) return self.engine def tearDown(self): engines.testing_reaper.close_all() self.engine.dispose() @testing.combinations( ("global_string", True, "select 1", True), ("global_text", True, text("select 1"), True), ("global_expr", True, select([1]), True), ("global_off_explicit", False, text("select 1"), False), ( "stmt_option", False, select([1]).execution_options(stream_results=True), True, ), ( "stmt_option_disabled", True, select([1]).execution_options(stream_results=False), False, ), ("for_update_expr", True, select([1]).with_for_update(), True), ("for_update_string", True, "SELECT 1 FOR UPDATE", True), ("text_no_ss", False, text("select 42"), False), ( "text_ss_option", False, text("select 42").execution_options(stream_results=True), True, ), id_="iaaa", argnames="engine_ss_arg, statement, cursor_ss_status", ) def test_ss_cursor_status( self, engine_ss_arg, statement, cursor_ss_status ): engine = self._fixture(engine_ss_arg) with engine.begin() as conn: result = conn.execute(statement) eq_(self._is_server_side(result.cursor), cursor_ss_status) result.close() def test_conn_option(self): engine = self._fixture(False) # should be enabled for this one result = ( engine.connect() .execution_options(stream_results=True) .execute("select 1") ) assert self._is_server_side(result.cursor) def test_stmt_enabled_conn_option_disabled(self): engine = self._fixture(False) s = select([1]).execution_options(stream_results=True) # not this one result = ( engine.connect().execution_options(stream_results=False).execute(s) ) assert not self._is_server_side(result.cursor) def test_aliases_and_ss(self): engine = self._fixture(False) s1 = select([1]).execution_options(stream_results=True).alias() with engine.begin() as conn: result = conn.execute(s1) assert self._is_server_side(result.cursor) result.close() # s1's options shouldn't affect s2 when s2 is used as a # from_obj. s2 = select([1], from_obj=s1) with engine.begin() as conn: result = conn.execute(s2) assert not self._is_server_side(result.cursor) result.close() @testing.provide_metadata def test_roundtrip(self): md = self.metadata self._fixture(True) test_table = Table( "test_table", md, Column("id", Integer, primary_key=True), Column("data", String(50)), ) test_table.create(checkfirst=True) test_table.insert().execute(data="data1") test_table.insert().execute(data="data2") eq_( test_table.select().order_by(test_table.c.id).execute().fetchall(), [(1, "data1"), (2, "data2")], ) test_table.update().where(test_table.c.id == 2).values( data=test_table.c.data + " updated" ).execute() eq_( test_table.select().order_by(test_table.c.id).execute().fetchall(), [(1, "data1"), (2, "data2 updated")], ) test_table.delete().execute() eq_(select([func.count("*")]).select_from(test_table).scalar(), 0)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_insert.py
from .. import config from .. import engines from .. import fixtures from ..assertions import eq_ from ..config import requirements from ..schema import Column from ..schema import Table from ... import Integer from ... import literal from ... import literal_column from ... import select from ... import String class LastrowidTest(fixtures.TablesTest): run_deletes = "each" __backend__ = True __requires__ = "implements_get_lastrowid", "autoincrement_insert" __engine_options__ = {"implicit_returning": False} @classmethod def define_tables(cls, metadata): Table( "autoinc_pk", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data", String(50)), ) Table( "manual_pk", metadata, Column("id", Integer, primary_key=True, autoincrement=False), Column("data", String(50)), ) def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_(row, (config.db.dialect.default_sequence_base, "some data")) def test_autoincrement_on_insert(self): config.db.execute(self.tables.autoinc_pk.insert(), data="some data") self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_(r.inserted_primary_key, [pk]) # failed on pypy1.9 but seems to be OK on pypy 2.1 # @exclusions.fails_if(lambda: util.pypy, # "lastrowid not maintained after " # "connection close") @requirements.dbapi_lastrowid def test_native_lastrowid_autoinc(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) lastrowid = r.lastrowid pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_(lastrowid, pk) class InsertBehaviorTest(fixtures.TablesTest): run_deletes = "each" __backend__ = True @classmethod def define_tables(cls, metadata): Table( "autoinc_pk", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data", String(50)), ) Table( "manual_pk", metadata, Column("id", Integer, primary_key=True, autoincrement=False), Column("data", String(50)), ) Table( "includes_defaults", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data", String(50)), Column("x", Integer, default=5), Column( "y", Integer, default=literal_column("2", type_=Integer) + literal(2), ), ) def test_autoclose_on_insert(self): if requirements.returning.enabled: engine = engines.testing_engine( options={"implicit_returning": False} ) else: engine = config.db with engine.begin() as conn: r = conn.execute(self.tables.autoinc_pk.insert(), data="some data") assert r._soft_closed assert not r.closed assert r.is_insert assert not r.returns_rows @requirements.returning def test_autoclose_on_insert_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) assert r._soft_closed assert not r.closed assert r.is_insert assert not r.returns_rows @requirements.empty_inserts def test_empty_insert(self): r = config.db.execute(self.tables.autoinc_pk.insert()) assert r._soft_closed assert not r.closed r = config.db.execute( self.tables.autoinc_pk.select().where( self.tables.autoinc_pk.c.id != None ) ) assert len(r.fetchall()) @requirements.insert_from_select def test_insert_from_select_autoinc(self): src_table = self.tables.manual_pk dest_table = self.tables.autoinc_pk config.db.execute( src_table.insert(), [ dict(id=1, data="data1"), dict(id=2, data="data2"), dict(id=3, data="data3"), ], ) result = config.db.execute( dest_table.insert().from_select( ("data",), select([src_table.c.data]).where( src_table.c.data.in_(["data2", "data3"]) ), ) ) eq_(result.inserted_primary_key, [None]) result = config.db.execute( select([dest_table.c.data]).order_by(dest_table.c.data) ) eq_(result.fetchall(), [("data2",), ("data3",)]) @requirements.insert_from_select def test_insert_from_select_autoinc_no_rows(self): src_table = self.tables.manual_pk dest_table = self.tables.autoinc_pk result = config.db.execute( dest_table.insert().from_select( ("data",), select([src_table.c.data]).where( src_table.c.data.in_(["data2", "data3"]) ), ) ) eq_(result.inserted_primary_key, [None]) result = config.db.execute( select([dest_table.c.data]).order_by(dest_table.c.data) ) eq_(result.fetchall(), []) @requirements.insert_from_select def test_insert_from_select(self): table = self.tables.manual_pk config.db.execute( table.insert(), [ dict(id=1, data="data1"), dict(id=2, data="data2"), dict(id=3, data="data3"), ], ) config.db.execute( table.insert(inline=True).from_select( ("id", "data"), select([table.c.id + 5, table.c.data]).where( table.c.data.in_(["data2", "data3"]) ), ) ) eq_( config.db.execute( select([table.c.data]).order_by(table.c.data) ).fetchall(), [("data1",), ("data2",), ("data2",), ("data3",), ("data3",)], ) @requirements.insert_from_select def test_insert_from_select_with_defaults(self): table = self.tables.includes_defaults config.db.execute( table.insert(), [ dict(id=1, data="data1"), dict(id=2, data="data2"), dict(id=3, data="data3"), ], ) config.db.execute( table.insert(inline=True).from_select( ("id", "data"), select([table.c.id + 5, table.c.data]).where( table.c.data.in_(["data2", "data3"]) ), ) ) eq_( config.db.execute( select([table]).order_by(table.c.data, table.c.id) ).fetchall(), [ (1, "data1", 5, 4), (2, "data2", 5, 4), (7, "data2", 5, 4), (3, "data3", 5, 4), (8, "data3", 5, 4), ], ) class ReturningTest(fixtures.TablesTest): run_create_tables = "each" __requires__ = "returning", "autoincrement_insert" __backend__ = True __engine_options__ = {"implicit_returning": True} def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_(row, (config.db.dialect.default_sequence_base, "some data")) @classmethod def define_tables(cls, metadata): Table( "autoinc_pk", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data", String(50)), ) @requirements.fetch_rows_post_commit def test_explicit_returning_pk_autocommit(self): engine = config.db table = self.tables.autoinc_pk with engine.begin() as conn: r = conn.execute( table.insert().returning(table.c.id), data="some data" ) pk = r.first()[0] fetched_pk = config.db.scalar(select([table.c.id])) eq_(fetched_pk, pk) def test_explicit_returning_pk_no_autocommit(self): engine = config.db table = self.tables.autoinc_pk with engine.begin() as conn: r = conn.execute( table.insert().returning(table.c.id), data="some data" ) pk = r.first()[0] fetched_pk = config.db.scalar(select([table.c.id])) eq_(fetched_pk, pk) def test_autoincrement_on_insert_implicit_returning(self): config.db.execute(self.tables.autoinc_pk.insert(), data="some data") self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_(r.inserted_primary_key, [pk]) __all__ = ("LastrowidTest", "InsertBehaviorTest", "ReturningTest")
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_select.py
from .. import config from .. import fixtures from ..assertions import eq_ from ..assertions import in_ from ..schema import Column from ..schema import Table from ... import bindparam from ... import case from ... import Computed from ... import false from ... import func from ... import Integer from ... import literal_column from ... import null from ... import select from ... import String from ... import testing from ... import text from ... import true from ... import tuple_ from ... import union from ... import util class CollateTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("data", String(100)), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "data": "collate data1"}, {"id": 2, "data": "collate data2"}, ], ) def _assert_result(self, select, result): eq_(config.db.execute(select).fetchall(), result) @testing.requires.order_by_collation def test_collate_order_by(self): collation = testing.requires.get_order_by_collation(testing.config) self._assert_result( select([self.tables.some_table]).order_by( self.tables.some_table.c.data.collate(collation).asc() ), [(1, "collate data1"), (2, "collate data2")], ) class OrderByLabelTest(fixtures.TablesTest): """Test the dialect sends appropriate ORDER BY expressions when labels are used. This essentially exercises the "supports_simple_order_by_label" setting. """ __backend__ = True @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("x", Integer), Column("y", Integer), Column("q", String(50)), Column("p", String(50)), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"}, {"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"}, {"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"}, ], ) def _assert_result(self, select, result): eq_(config.db.execute(select).fetchall(), result) def test_plain(self): table = self.tables.some_table lx = table.c.x.label("lx") self._assert_result(select([lx]).order_by(lx), [(1,), (2,), (3,)]) def test_composed_int(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label("lx") self._assert_result(select([lx]).order_by(lx), [(3,), (5,), (7,)]) def test_composed_multiple(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label("lx") ly = (func.lower(table.c.q) + table.c.p).label("ly") self._assert_result( select([lx, ly]).order_by(lx, ly.desc()), [(3, util.u("q1p3")), (5, util.u("q2p2")), (7, util.u("q3p1"))], ) def test_plain_desc(self): table = self.tables.some_table lx = table.c.x.label("lx") self._assert_result( select([lx]).order_by(lx.desc()), [(3,), (2,), (1,)] ) def test_composed_int_desc(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label("lx") self._assert_result( select([lx]).order_by(lx.desc()), [(7,), (5,), (3,)] ) @testing.requires.group_by_complex_expression def test_group_by_composed(self): table = self.tables.some_table expr = (table.c.x + table.c.y).label("lx") stmt = ( select([func.count(table.c.id), expr]) .group_by(expr) .order_by(expr) ) self._assert_result(stmt, [(1, 3), (1, 5), (1, 7)]) class LimitOffsetTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("x", Integer), Column("y", Integer), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "x": 1, "y": 2}, {"id": 2, "x": 2, "y": 3}, {"id": 3, "x": 3, "y": 4}, {"id": 4, "x": 4, "y": 5}, ], ) def _assert_result(self, select, result, params=()): eq_(config.db.execute(select, params).fetchall(), result) def test_simple_limit(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).limit(2), [(1, 1, 2), (2, 2, 3)], ) @testing.requires.offset def test_simple_offset(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).offset(2), [(3, 3, 4), (4, 4, 5)], ) @testing.requires.offset def test_simple_limit_offset(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).limit(2).offset(1), [(2, 2, 3), (3, 3, 4)], ) @testing.requires.offset def test_limit_offset_nobinds(self): """test that 'literal binds' mode works - no bound params.""" table = self.tables.some_table stmt = select([table]).order_by(table.c.id).limit(2).offset(1) sql = stmt.compile( dialect=config.db.dialect, compile_kwargs={"literal_binds": True} ) sql = str(sql) self._assert_result(sql, [(2, 2, 3), (3, 3, 4)]) @testing.requires.bound_limit_offset def test_bound_limit(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).limit(bindparam("l")), [(1, 1, 2), (2, 2, 3)], params={"l": 2}, ) @testing.requires.bound_limit_offset def test_bound_offset(self): table = self.tables.some_table self._assert_result( select([table]).order_by(table.c.id).offset(bindparam("o")), [(3, 3, 4), (4, 4, 5)], params={"o": 2}, ) @testing.requires.bound_limit_offset def test_bound_limit_offset(self): table = self.tables.some_table self._assert_result( select([table]) .order_by(table.c.id) .limit(bindparam("l")) .offset(bindparam("o")), [(2, 2, 3), (3, 3, 4)], params={"l": 2, "o": 1}, ) class CompoundSelectTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("x", Integer), Column("y", Integer), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "x": 1, "y": 2}, {"id": 2, "x": 2, "y": 3}, {"id": 3, "x": 3, "y": 4}, {"id": 4, "x": 4, "y": 5}, ], ) def _assert_result(self, select, result, params=()): eq_(config.db.execute(select, params).fetchall(), result) def test_plain_union(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2) s2 = select([table]).where(table.c.id == 3) u1 = union(s1, s2) self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]) def test_select_from_plain_union(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2) s2 = select([table]).where(table.c.id == 3) u1 = union(s1, s2).alias().select() self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]) @testing.requires.order_by_col_from_union @testing.requires.parens_in_union_contained_select_w_limit_offset def test_limit_offset_selectable_in_unions(self): table = self.tables.some_table s1 = ( select([table]) .where(table.c.id == 2) .limit(1) .order_by(table.c.id) ) s2 = ( select([table]) .where(table.c.id == 3) .limit(1) .order_by(table.c.id) ) u1 = union(s1, s2).limit(2) self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]) @testing.requires.parens_in_union_contained_select_wo_limit_offset def test_order_by_selectable_in_unions(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2).order_by(table.c.id) s2 = select([table]).where(table.c.id == 3).order_by(table.c.id) u1 = union(s1, s2).limit(2) self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]) def test_distinct_selectable_in_unions(self): table = self.tables.some_table s1 = select([table]).where(table.c.id == 2).distinct() s2 = select([table]).where(table.c.id == 3).distinct() u1 = union(s1, s2).limit(2) self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]) @testing.requires.parens_in_union_contained_select_w_limit_offset def test_limit_offset_in_unions_from_alias(self): table = self.tables.some_table s1 = ( select([table]) .where(table.c.id == 2) .limit(1) .order_by(table.c.id) ) s2 = ( select([table]) .where(table.c.id == 3) .limit(1) .order_by(table.c.id) ) # this necessarily has double parens u1 = union(s1, s2).alias() self._assert_result( u1.select().limit(2).order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)] ) def test_limit_offset_aliased_selectable_in_unions(self): table = self.tables.some_table s1 = ( select([table]) .where(table.c.id == 2) .limit(1) .order_by(table.c.id) .alias() .select() ) s2 = ( select([table]) .where(table.c.id == 3) .limit(1) .order_by(table.c.id) .alias() .select() ) u1 = union(s1, s2).limit(2) self._assert_result(u1.order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]) class ExpandingBoundInTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("x", Integer), Column("y", Integer), Column("z", String(50)), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "x": 1, "y": 2, "z": "z1"}, {"id": 2, "x": 2, "y": 3, "z": "z2"}, {"id": 3, "x": 3, "y": 4, "z": "z3"}, {"id": 4, "x": 4, "y": 5, "z": "z4"}, ], ) def _assert_result(self, select, result, params=()): eq_(config.db.execute(select, params).fetchall(), result) def test_multiple_empty_sets(self): # test that any anonymous aliasing used by the dialect # is fine with duplicates table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.x.in_(bindparam("q", expanding=True))) .where(table.c.y.in_(bindparam("p", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [], params={"q": [], "p": []}) @testing.requires.tuple_in def test_empty_heterogeneous_tuples(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.z).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result(stmt, [], params={"q": []}) @testing.requires.tuple_in def test_empty_homogeneous_tuples(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.y).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result(stmt, [], params={"q": []}) def test_bound_in_scalar(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.x.in_(bindparam("q", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [(2,), (3,), (4,)], params={"q": [2, 3, 4]}) @testing.requires.tuple_in def test_bound_in_two_tuple(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.y).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result( stmt, [(2,), (3,), (4,)], params={"q": [(2, 3), (3, 4), (4, 5)]} ) @testing.requires.tuple_in def test_bound_in_heterogeneous_two_tuple(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where( tuple_(table.c.x, table.c.z).in_( bindparam("q", expanding=True) ) ) .order_by(table.c.id) ) self._assert_result( stmt, [(2,), (3,), (4,)], params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]}, ) def test_empty_set_against_integer(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.x.in_(bindparam("q", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [], params={"q": []}) def test_empty_set_against_integer_negation(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.x.notin_(bindparam("q", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []}) def test_empty_set_against_string(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.z.in_(bindparam("q", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [], params={"q": []}) def test_empty_set_against_string_negation(self): table = self.tables.some_table stmt = ( select([table.c.id]) .where(table.c.z.notin_(bindparam("q", expanding=True))) .order_by(table.c.id) ) self._assert_result(stmt, [(1,), (2,), (3,), (4,)], params={"q": []}) def test_null_in_empty_set_is_false(self): stmt = select( [ case( [ ( null().in_( bindparam("foo", value=(), expanding=True) ), true(), ) ], else_=false(), ) ] ) in_(config.db.execute(stmt).fetchone()[0], (False, 0)) class LikeFunctionsTest(fixtures.TablesTest): __backend__ = True run_inserts = "once" run_deletes = None @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "data": "abcdefg"}, {"id": 2, "data": "ab/cdefg"}, {"id": 3, "data": "ab%cdefg"}, {"id": 4, "data": "ab_cdefg"}, {"id": 5, "data": "abcde/fg"}, {"id": 6, "data": "abcde%fg"}, {"id": 7, "data": "ab#cdefg"}, {"id": 8, "data": "ab9cdefg"}, {"id": 9, "data": "abcde#fg"}, {"id": 10, "data": "abcd9fg"}, ], ) def _test(self, expr, expected): some_table = self.tables.some_table with config.db.connect() as conn: rows = { value for value, in conn.execute( select([some_table.c.id]).where(expr) ) } eq_(rows, expected) def test_startswith_unescaped(self): col = self.tables.some_table.c.data self._test(col.startswith("ab%c"), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) def test_startswith_autoescape(self): col = self.tables.some_table.c.data self._test(col.startswith("ab%c", autoescape=True), {3}) def test_startswith_sqlexpr(self): col = self.tables.some_table.c.data self._test( col.startswith(literal_column("'ab%c'")), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, ) def test_startswith_escape(self): col = self.tables.some_table.c.data self._test(col.startswith("ab##c", escape="#"), {7}) def test_startswith_autoescape_escape(self): col = self.tables.some_table.c.data self._test(col.startswith("ab%c", autoescape=True, escape="#"), {3}) self._test(col.startswith("ab#c", autoescape=True, escape="#"), {7}) def test_endswith_unescaped(self): col = self.tables.some_table.c.data self._test(col.endswith("e%fg"), {1, 2, 3, 4, 5, 6, 7, 8, 9}) def test_endswith_sqlexpr(self): col = self.tables.some_table.c.data self._test( col.endswith(literal_column("'e%fg'")), {1, 2, 3, 4, 5, 6, 7, 8, 9} ) def test_endswith_autoescape(self): col = self.tables.some_table.c.data self._test(col.endswith("e%fg", autoescape=True), {6}) def test_endswith_escape(self): col = self.tables.some_table.c.data self._test(col.endswith("e##fg", escape="#"), {9}) def test_endswith_autoescape_escape(self): col = self.tables.some_table.c.data self._test(col.endswith("e%fg", autoescape=True, escape="#"), {6}) self._test(col.endswith("e#fg", autoescape=True, escape="#"), {9}) def test_contains_unescaped(self): col = self.tables.some_table.c.data self._test(col.contains("b%cde"), {1, 2, 3, 4, 5, 6, 7, 8, 9}) def test_contains_autoescape(self): col = self.tables.some_table.c.data self._test(col.contains("b%cde", autoescape=True), {3}) def test_contains_escape(self): col = self.tables.some_table.c.data self._test(col.contains("b##cde", escape="#"), {7}) def test_contains_autoescape_escape(self): col = self.tables.some_table.c.data self._test(col.contains("b%cd", autoescape=True, escape="#"), {3}) self._test(col.contains("b#cd", autoescape=True, escape="#"), {7}) class ComputedColumnTest(fixtures.TablesTest): __backend__ = True __requires__ = ("computed_columns",) @classmethod def define_tables(cls, metadata): Table( "square", metadata, Column("id", Integer, primary_key=True), Column("side", Integer), Column("area", Integer, Computed("side * side")), Column("perimeter", Integer, Computed("4 * side")), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.square.insert(), [{"id": 1, "side": 10}, {"id": 10, "side": 42}], ) def test_select_all(self): with config.db.connect() as conn: res = conn.execute( select([text("*")]) .select_from(self.tables.square) .order_by(self.tables.square.c.id) ).fetchall() eq_(res, [(1, 10, 100, 40), (10, 42, 1764, 168)]) def test_select_columns(self): with config.db.connect() as conn: res = conn.execute( select( [self.tables.square.c.area, self.tables.square.c.perimeter] ) .select_from(self.tables.square) .order_by(self.tables.square.c.id) ).fetchall() eq_(res, [(100, 40), (1764, 168)]) class IsOrIsNotDistinctFromTest(fixtures.TablesTest): __backend__ = True __requires__ = ("supports_is_distinct_from",) @classmethod def define_tables(cls, metadata): Table( "is_distinct_test", metadata, Column("id", Integer, primary_key=True), Column("col_a", Integer, nullable=True), Column("col_b", Integer, nullable=True), ) @testing.combinations( ("both_int_different", 0, 1, 1), ("both_int_same", 1, 1, 0), ("one_null_first", None, 1, 1), ("one_null_second", 0, None, 1), ("both_null", None, None, 0), id_="iaaa", argnames="col_a_value, col_b_value, expected_row_count_for_is", ) def test_is_or_isnot_distinct_from( self, col_a_value, col_b_value, expected_row_count_for_is, connection ): tbl = self.tables.is_distinct_test connection.execute( tbl.insert(), [{"id": 1, "col_a": col_a_value, "col_b": col_b_value}], ) result = connection.execute( tbl.select(tbl.c.col_a.is_distinct_from(tbl.c.col_b)) ).fetchall() eq_( len(result), expected_row_count_for_is, ) expected_row_count_for_isnot = ( 1 if expected_row_count_for_is == 0 else 0 ) result = connection.execute( tbl.select(tbl.c.col_a.isnot_distinct_from(tbl.c.col_b)) ).fetchall() eq_( len(result), expected_row_count_for_isnot, )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_ddl.py
from .. import config from .. import fixtures from .. import util from ..assertions import eq_ from ..config import requirements from ... import Column from ... import inspect from ... import Integer from ... import schema from ... import String from ... import Table class TableDDLTest(fixtures.TestBase): __backend__ = True def _simple_fixture(self, schema=None): return Table( "test_table", self.metadata, Column("id", Integer, primary_key=True, autoincrement=False), Column("data", String(50)), schema=schema, ) def _underscore_fixture(self): return Table( "_test_table", self.metadata, Column("id", Integer, primary_key=True, autoincrement=False), Column("_data", String(50)), ) def _simple_roundtrip(self, table): with config.db.begin() as conn: conn.execute(table.insert().values((1, "some data"))) result = conn.execute(table.select()) eq_(result.first(), (1, "some data")) @requirements.create_table @util.provide_metadata def test_create_table(self): table = self._simple_fixture() table.create(config.db, checkfirst=False) self._simple_roundtrip(table) @requirements.create_table @requirements.schemas @util.provide_metadata def test_create_table_schema(self): table = self._simple_fixture(schema=config.test_schema) table.create(config.db, checkfirst=False) self._simple_roundtrip(table) @requirements.drop_table @util.provide_metadata def test_drop_table(self): table = self._simple_fixture() table.create(config.db, checkfirst=False) table.drop(config.db, checkfirst=False) @requirements.create_table @util.provide_metadata def test_underscore_names(self): table = self._underscore_fixture() table.create(config.db, checkfirst=False) self._simple_roundtrip(table) @requirements.comment_reflection @util.provide_metadata def test_add_table_comment(self): table = self._simple_fixture() table.create(config.db, checkfirst=False) table.comment = "a comment" config.db.execute(schema.SetTableComment(table)) eq_( inspect(config.db).get_table_comment("test_table"), {"text": "a comment"}, ) @requirements.comment_reflection @util.provide_metadata def test_drop_table_comment(self): table = self._simple_fixture() table.create(config.db, checkfirst=False) table.comment = "a comment" config.db.execute(schema.SetTableComment(table)) config.db.execute(schema.DropTableComment(table)) eq_(inspect(config.db).get_table_comment("test_table"), {"text": None}) __all__ = ("TableDDLTest",)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_dialect.py
#! coding: utf-8 from .. import assert_raises from .. import config from .. import eq_ from .. import fixtures from .. import ne_ from .. import provide_metadata from ..config import requirements from ..schema import Column from ..schema import Table from ... import exc from ... import Integer from ... import literal_column from ... import select from ... import String from ...util import compat class ExceptionTest(fixtures.TablesTest): """Test basic exception wrapping. DBAPIs vary a lot in exception behavior so to actually anticipate specific exceptions from real round trips, we need to be conservative. """ run_deletes = "each" __backend__ = True @classmethod def define_tables(cls, metadata): Table( "manual_pk", metadata, Column("id", Integer, primary_key=True, autoincrement=False), Column("data", String(50)), ) @requirements.duplicate_key_raises_integrity_error def test_integrity_error(self): with config.db.connect() as conn: trans = conn.begin() conn.execute( self.tables.manual_pk.insert(), {"id": 1, "data": "d1"} ) assert_raises( exc.IntegrityError, conn.execute, self.tables.manual_pk.insert(), {"id": 1, "data": "d1"}, ) trans.rollback() def test_exception_with_non_ascii(self): with config.db.connect() as conn: try: # try to create an error message that likely has non-ascii # characters in the DBAPI's message string. unfortunately # there's no way to make this happen with some drivers like # mysqlclient, pymysql. this at least does produce a non- # ascii error message for cx_oracle, psycopg2 conn.execute(select([literal_column(u"méil")])) assert False except exc.DBAPIError as err: err_str = str(err) assert str(err.orig) in str(err) # test that we are actually getting string on Py2k, unicode # on Py3k. if compat.py2k: assert isinstance(err_str, str) else: assert isinstance(err_str, str) class IsolationLevelTest(fixtures.TestBase): __backend__ = True __requires__ = ("isolation_level",) def _get_non_default_isolation_level(self): levels = requirements.get_isolation_levels(config) default = levels["default"] supported = levels["supported"] s = set(supported).difference(["AUTOCOMMIT", default]) if s: return s.pop() else: config.skip_test("no non-default isolation level available") def test_default_isolation_level(self): eq_( config.db.dialect.default_isolation_level, requirements.get_isolation_levels(config)["default"], ) def test_non_default_isolation_level(self): non_default = self._get_non_default_isolation_level() with config.db.connect() as conn: existing = conn.get_isolation_level() ne_(existing, non_default) conn.execution_options(isolation_level=non_default) eq_(conn.get_isolation_level(), non_default) conn.dialect.reset_isolation_level(conn.connection) eq_(conn.get_isolation_level(), existing) class AutocommitTest(fixtures.TablesTest): run_deletes = "each" __requires__ = ("autocommit",) __backend__ = True @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True, autoincrement=False), Column("data", String(50)), test_needs_acid=True, ) def _test_conn_autocommits(self, conn, autocommit): trans = conn.begin() conn.execute( self.tables.some_table.insert(), {"id": 1, "data": "some data"} ) trans.rollback() eq_( conn.scalar(select([self.tables.some_table.c.id])), 1 if autocommit else None, ) conn.execute(self.tables.some_table.delete()) def test_autocommit_on(self): conn = config.db.connect() c2 = conn.execution_options(isolation_level="AUTOCOMMIT") self._test_conn_autocommits(c2, True) c2.dialect.reset_isolation_level(c2.connection) self._test_conn_autocommits(conn, False) def test_autocommit_off(self): conn = config.db.connect() self._test_conn_autocommits(conn, False) def test_turn_autocommit_off_via_default_iso_level(self): conn = config.db.connect() conn.execution_options(isolation_level="AUTOCOMMIT") self._test_conn_autocommits(conn, True) conn.execution_options( isolation_level=requirements.get_isolation_levels(config)[ "default" ] ) self._test_conn_autocommits(conn, False) class EscapingTest(fixtures.TestBase): @provide_metadata def test_percent_sign_round_trip(self): """test that the DBAPI accommodates for escaped / nonescaped percent signs in a way that matches the compiler """ m = self.metadata t = Table("t", m, Column("data", String(50))) t.create(config.db) with config.db.begin() as conn: conn.execute(t.insert(), dict(data="some % value")) conn.execute(t.insert(), dict(data="some %% other value")) eq_( conn.scalar( select([t.c.data]).where( t.c.data == literal_column("'some % value'") ) ), "some % value", ) eq_( conn.scalar( select([t.c.data]).where( t.c.data == literal_column("'some %% other value'") ) ), "some %% other value", )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/__init__.py
from .test_cte import * # noqa from .test_ddl import * # noqa from .test_dialect import * # noqa from .test_insert import * # noqa from .test_reflection import * # noqa from .test_results import * # noqa from .test_select import * # noqa from .test_sequence import * # noqa from .test_types import * # noqa from .test_update_delete import * # noqa
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_update_delete.py
from .. import config from .. import fixtures from ..assertions import eq_ from ..schema import Column from ..schema import Table from ... import Integer from ... import String class SimpleUpdateDeleteTest(fixtures.TablesTest): run_deletes = "each" __backend__ = True @classmethod def define_tables(cls, metadata): Table( "plain_pk", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.plain_pk.insert(), [ {"id": 1, "data": "d1"}, {"id": 2, "data": "d2"}, {"id": 3, "data": "d3"}, ], ) def test_update(self): t = self.tables.plain_pk r = config.db.execute(t.update().where(t.c.id == 2), data="d2_new") assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [(1, "d1"), (2, "d2_new"), (3, "d3")], ) def test_delete(self): t = self.tables.plain_pk r = config.db.execute(t.delete().where(t.c.id == 2)) assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [(1, "d1"), (3, "d3")], ) __all__ = ("SimpleUpdateDeleteTest",)
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_reflection.py
import operator import re import sqlalchemy as sa from .. import assert_raises_message from .. import config from .. import engines from .. import eq_ from .. import expect_warnings from .. import fixtures from .. import is_ from ..provision import temp_table_keyword_args from ..schema import Column from ..schema import Table from ... import event from ... import exc as sa_exc from ... import ForeignKey from ... import inspect from ... import Integer from ... import MetaData from ... import String from ... import testing from ... import types as sql_types from ...engine.reflection import Inspector from ...schema import DDL from ...schema import Index from ...sql.elements import quoted_name from ...testing import is_false from ...testing import is_true metadata, users = None, None class HasTableTest(fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "test_table", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), ) if testing.requires.schemas.enabled: Table( "test_table_s", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), schema=config.test_schema, ) def test_has_table(self): with config.db.begin() as conn: is_true(config.db.dialect.has_table(conn, "test_table")) is_false(config.db.dialect.has_table(conn, "test_table_s")) is_false(config.db.dialect.has_table(conn, "nonexistent_table")) @testing.requires.schemas def test_has_table_schema(self): with config.db.begin() as conn: is_false( config.db.dialect.has_table( conn, "test_table", schema=config.test_schema ) ) is_true( config.db.dialect.has_table( conn, "test_table_s", schema=config.test_schema ) ) is_false( config.db.dialect.has_table( conn, "nonexistent_table", schema=config.test_schema ) ) class ComponentReflectionTest(fixtures.TablesTest): run_inserts = run_deletes = None __backend__ = True @classmethod def setup_bind(cls): if config.requirements.independent_connections.enabled: from sqlalchemy import pool return engines.testing_engine( options=dict(poolclass=pool.StaticPool) ) else: return config.db @classmethod def define_tables(cls, metadata): cls.define_reflected_tables(metadata, None) if testing.requires.schemas.enabled: cls.define_reflected_tables(metadata, testing.config.test_schema) @classmethod def define_reflected_tables(cls, metadata, schema): if schema: schema_prefix = schema + "." else: schema_prefix = "" if testing.requires.self_referential_foreign_keys.enabled: users = Table( "users", metadata, Column("user_id", sa.INT, primary_key=True), Column("test1", sa.CHAR(5), nullable=False), Column("test2", sa.Float(5), nullable=False), Column( "parent_user_id", sa.Integer, sa.ForeignKey( "%susers.user_id" % schema_prefix, name="user_id_fk" ), ), schema=schema, test_needs_fk=True, ) else: users = Table( "users", metadata, Column("user_id", sa.INT, primary_key=True), Column("test1", sa.CHAR(5), nullable=False), Column("test2", sa.Float(5), nullable=False), schema=schema, test_needs_fk=True, ) Table( "dingalings", metadata, Column("dingaling_id", sa.Integer, primary_key=True), Column( "address_id", sa.Integer, sa.ForeignKey("%semail_addresses.address_id" % schema_prefix), ), Column("data", sa.String(30)), schema=schema, test_needs_fk=True, ) Table( "email_addresses", metadata, Column("address_id", sa.Integer), Column( "remote_user_id", sa.Integer, sa.ForeignKey(users.c.user_id) ), Column("email_address", sa.String(20)), sa.PrimaryKeyConstraint("address_id", name="email_ad_pk"), schema=schema, test_needs_fk=True, ) Table( "comment_test", metadata, Column("id", sa.Integer, primary_key=True, comment="id comment"), Column("data", sa.String(20), comment="data % comment"), Column( "d2", sa.String(20), comment=r"""Comment types type speedily ' " \ '' Fun!""", ), schema=schema, comment=r"""the test % ' " \ table comment""", ) if testing.requires.cross_schema_fk_reflection.enabled: if schema is None: Table( "local_table", metadata, Column("id", sa.Integer, primary_key=True), Column("data", sa.String(20)), Column( "remote_id", ForeignKey( "%s.remote_table_2.id" % testing.config.test_schema ), ), test_needs_fk=True, schema=config.db.dialect.default_schema_name, ) else: Table( "remote_table", metadata, Column("id", sa.Integer, primary_key=True), Column( "local_id", ForeignKey( "%s.local_table.id" % config.db.dialect.default_schema_name ), ), Column("data", sa.String(20)), schema=schema, test_needs_fk=True, ) Table( "remote_table_2", metadata, Column("id", sa.Integer, primary_key=True), Column("data", sa.String(20)), schema=schema, test_needs_fk=True, ) if testing.requires.index_reflection.enabled: cls.define_index(metadata, users) if not schema: # test_needs_fk is at the moment to force MySQL InnoDB noncol_idx_test_nopk = Table( "noncol_idx_test_nopk", metadata, Column("q", sa.String(5)), test_needs_fk=True, ) noncol_idx_test_pk = Table( "noncol_idx_test_pk", metadata, Column("id", sa.Integer, primary_key=True), Column("q", sa.String(5)), test_needs_fk=True, ) if testing.requires.indexes_with_ascdesc.enabled: Index("noncol_idx_nopk", noncol_idx_test_nopk.c.q.desc()) Index("noncol_idx_pk", noncol_idx_test_pk.c.q.desc()) if testing.requires.view_column_reflection.enabled: cls.define_views(metadata, schema) if not schema and testing.requires.temp_table_reflection.enabled: cls.define_temp_tables(metadata) @classmethod def define_temp_tables(cls, metadata): kw = temp_table_keyword_args(config, config.db) user_tmp = Table( "user_tmp", metadata, Column("id", sa.INT, primary_key=True), Column("name", sa.VARCHAR(50)), Column("foo", sa.INT), sa.UniqueConstraint("name", name="user_tmp_uq"), sa.Index("user_tmp_ix", "foo"), **kw ) if ( testing.requires.view_reflection.enabled and testing.requires.temporary_views.enabled ): event.listen( user_tmp, "after_create", DDL( "create temporary view user_tmp_v as " "select * from user_tmp" ), ) event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v")) @classmethod def define_index(cls, metadata, users): Index("users_t_idx", users.c.test1, users.c.test2) Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1) @classmethod def define_views(cls, metadata, schema): for table_name in ("users", "email_addresses"): fullname = table_name if schema: fullname = "%s.%s" % (schema, table_name) view_name = fullname + "_v" query = "CREATE VIEW %s AS SELECT * FROM %s" % ( view_name, fullname, ) event.listen(metadata, "after_create", DDL(query)) event.listen( metadata, "before_drop", DDL("DROP VIEW %s" % view_name) ) @testing.requires.schema_reflection def test_get_schema_names(self): insp = inspect(testing.db) self.assert_(testing.config.test_schema in insp.get_schema_names()) @testing.requires.schema_reflection def test_dialect_initialize(self): engine = engines.testing_engine() assert not hasattr(engine.dialect, "default_schema_name") inspect(engine) assert hasattr(engine.dialect, "default_schema_name") @testing.requires.schema_reflection def test_get_default_schema_name(self): insp = inspect(testing.db) eq_(insp.default_schema_name, testing.db.dialect.default_schema_name) @testing.provide_metadata def _test_get_table_names( self, schema=None, table_type="table", order_by=None ): _ignore_tables = [ "comment_test", "noncol_idx_test_pk", "noncol_idx_test_nopk", "local_table", "remote_table", "remote_table_2", ] meta = self.metadata insp = inspect(meta.bind) if table_type == "view": table_names = insp.get_view_names(schema) table_names.sort() answer = ["email_addresses_v", "users_v"] eq_(sorted(table_names), answer) else: if order_by: tables = [ rec[0] for rec in insp.get_sorted_table_and_fkc_names(schema) if rec[0] ] else: tables = insp.get_table_names(schema) table_names = [t for t in tables if t not in _ignore_tables] if order_by == "foreign_key": answer = ["users", "email_addresses", "dingalings"] eq_(table_names, answer) else: answer = ["dingalings", "email_addresses", "users"] eq_(sorted(table_names), answer) @testing.requires.temp_table_names def test_get_temp_table_names(self): insp = inspect(self.bind) temp_table_names = insp.get_temp_table_names() eq_(sorted(temp_table_names), ["user_tmp"]) @testing.requires.view_reflection @testing.requires.temp_table_names @testing.requires.temporary_views def test_get_temp_view_names(self): insp = inspect(self.bind) temp_table_names = insp.get_temp_view_names() eq_(sorted(temp_table_names), ["user_tmp_v"]) @testing.requires.table_reflection def test_get_table_names(self): self._test_get_table_names() @testing.requires.table_reflection @testing.requires.foreign_key_constraint_reflection def test_get_table_names_fks(self): self._test_get_table_names(order_by="foreign_key") @testing.requires.comment_reflection def test_get_comments(self): self._test_get_comments() @testing.requires.comment_reflection @testing.requires.schemas def test_get_comments_with_schema(self): self._test_get_comments(testing.config.test_schema) def _test_get_comments(self, schema=None): insp = inspect(testing.db) eq_( insp.get_table_comment("comment_test", schema=schema), {"text": r"""the test % ' " \ table comment"""}, ) eq_(insp.get_table_comment("users", schema=schema), {"text": None}) eq_( [ {"name": rec["name"], "comment": rec["comment"]} for rec in insp.get_columns("comment_test", schema=schema) ], [ {"comment": "id comment", "name": "id"}, {"comment": "data % comment", "name": "data"}, { "comment": ( r"""Comment types type speedily ' " \ '' Fun!""" ), "name": "d2", }, ], ) @testing.requires.table_reflection @testing.requires.schemas def test_get_table_names_with_schema(self): self._test_get_table_names(testing.config.test_schema) @testing.requires.view_column_reflection def test_get_view_names(self): self._test_get_table_names(table_type="view") @testing.requires.view_column_reflection @testing.requires.schemas def test_get_view_names_with_schema(self): self._test_get_table_names( testing.config.test_schema, table_type="view" ) @testing.requires.table_reflection @testing.requires.view_column_reflection def test_get_tables_and_views(self): self._test_get_table_names() self._test_get_table_names(table_type="view") def _test_get_columns(self, schema=None, table_type="table"): meta = MetaData(testing.db) users, addresses = (self.tables.users, self.tables.email_addresses) table_names = ["users", "email_addresses"] if table_type == "view": table_names = ["users_v", "email_addresses_v"] insp = inspect(meta.bind) for table_name, table in zip(table_names, (users, addresses)): schema_name = schema cols = insp.get_columns(table_name, schema=schema_name) self.assert_(len(cols) > 0, len(cols)) # should be in order for i, col in enumerate(table.columns): eq_(col.name, cols[i]["name"]) ctype = cols[i]["type"].__class__ ctype_def = col.type if isinstance(ctype_def, sa.types.TypeEngine): ctype_def = ctype_def.__class__ # Oracle returns Date for DateTime. if testing.against("oracle") and ctype_def in ( sql_types.Date, sql_types.DateTime, ): ctype_def = sql_types.Date # assert that the desired type and return type share # a base within one of the generic types. self.assert_( len( set(ctype.__mro__) .intersection(ctype_def.__mro__) .intersection( [ sql_types.Integer, sql_types.Numeric, sql_types.DateTime, sql_types.Date, sql_types.Time, sql_types.String, sql_types._Binary, ] ) ) > 0, "%s(%s), %s(%s)" % (col.name, col.type, cols[i]["name"], ctype), ) if not col.primary_key: assert cols[i]["default"] is None @testing.requires.table_reflection def test_get_columns(self): self._test_get_columns() @testing.provide_metadata def _type_round_trip(self, *types): t = Table( "t", self.metadata, *[Column("t%d" % i, type_) for i, type_ in enumerate(types)] ) t.create() return [ c["type"] for c in inspect(self.metadata.bind).get_columns("t") ] @testing.requires.table_reflection def test_numeric_reflection(self): for typ in self._type_round_trip(sql_types.Numeric(18, 5)): assert isinstance(typ, sql_types.Numeric) eq_(typ.precision, 18) eq_(typ.scale, 5) @testing.requires.table_reflection def test_varchar_reflection(self): typ = self._type_round_trip(sql_types.String(52))[0] assert isinstance(typ, sql_types.String) eq_(typ.length, 52) @testing.requires.table_reflection @testing.provide_metadata def test_nullable_reflection(self): t = Table( "t", self.metadata, Column("a", Integer, nullable=True), Column("b", Integer, nullable=False), ) t.create() eq_( dict( (col["name"], col["nullable"]) for col in inspect(self.metadata.bind).get_columns("t") ), {"a": True, "b": False}, ) @testing.requires.table_reflection @testing.requires.schemas def test_get_columns_with_schema(self): self._test_get_columns(schema=testing.config.test_schema) @testing.requires.temp_table_reflection def test_get_temp_table_columns(self): meta = MetaData(self.bind) user_tmp = self.tables.user_tmp insp = inspect(meta.bind) cols = insp.get_columns("user_tmp") self.assert_(len(cols) > 0, len(cols)) for i, col in enumerate(user_tmp.columns): eq_(col.name, cols[i]["name"]) @testing.requires.temp_table_reflection @testing.requires.view_column_reflection @testing.requires.temporary_views def test_get_temp_view_columns(self): insp = inspect(self.bind) cols = insp.get_columns("user_tmp_v") eq_([col["name"] for col in cols], ["id", "name", "foo"]) @testing.requires.view_column_reflection def test_get_view_columns(self): self._test_get_columns(table_type="view") @testing.requires.view_column_reflection @testing.requires.schemas def test_get_view_columns_with_schema(self): self._test_get_columns( schema=testing.config.test_schema, table_type="view" ) @testing.provide_metadata def _test_get_pk_constraint(self, schema=None): meta = self.metadata users, addresses = self.tables.users, self.tables.email_addresses insp = inspect(meta.bind) users_cons = insp.get_pk_constraint(users.name, schema=schema) users_pkeys = users_cons["constrained_columns"] eq_(users_pkeys, ["user_id"]) addr_cons = insp.get_pk_constraint(addresses.name, schema=schema) addr_pkeys = addr_cons["constrained_columns"] eq_(addr_pkeys, ["address_id"]) with testing.requires.reflects_pk_names.fail_if(): eq_(addr_cons["name"], "email_ad_pk") @testing.requires.primary_key_constraint_reflection def test_get_pk_constraint(self): self._test_get_pk_constraint() @testing.requires.table_reflection @testing.requires.primary_key_constraint_reflection @testing.requires.schemas def test_get_pk_constraint_with_schema(self): self._test_get_pk_constraint(schema=testing.config.test_schema) @testing.requires.table_reflection @testing.provide_metadata def test_deprecated_get_primary_keys(self): meta = self.metadata users = self.tables.users insp = Inspector(meta.bind) assert_raises_message( sa_exc.SADeprecationWarning, r".*get_primary_keys\(\) method is deprecated", insp.get_primary_keys, users.name, ) @testing.provide_metadata def _test_get_foreign_keys(self, schema=None): meta = self.metadata users, addresses = (self.tables.users, self.tables.email_addresses) insp = inspect(meta.bind) expected_schema = schema # users if testing.requires.self_referential_foreign_keys.enabled: users_fkeys = insp.get_foreign_keys(users.name, schema=schema) fkey1 = users_fkeys[0] with testing.requires.named_constraints.fail_if(): eq_(fkey1["name"], "user_id_fk") eq_(fkey1["referred_schema"], expected_schema) eq_(fkey1["referred_table"], users.name) eq_(fkey1["referred_columns"], ["user_id"]) if testing.requires.self_referential_foreign_keys.enabled: eq_(fkey1["constrained_columns"], ["parent_user_id"]) # addresses addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema) fkey1 = addr_fkeys[0] with testing.requires.implicitly_named_constraints.fail_if(): self.assert_(fkey1["name"] is not None) eq_(fkey1["referred_schema"], expected_schema) eq_(fkey1["referred_table"], users.name) eq_(fkey1["referred_columns"], ["user_id"]) eq_(fkey1["constrained_columns"], ["remote_user_id"]) @testing.requires.foreign_key_constraint_reflection def test_get_foreign_keys(self): self._test_get_foreign_keys() @testing.requires.foreign_key_constraint_reflection @testing.requires.schemas def test_get_foreign_keys_with_schema(self): self._test_get_foreign_keys(schema=testing.config.test_schema) @testing.requires.cross_schema_fk_reflection @testing.requires.schemas def test_get_inter_schema_foreign_keys(self): local_table, remote_table, remote_table_2 = self.tables( "%s.local_table" % testing.db.dialect.default_schema_name, "%s.remote_table" % testing.config.test_schema, "%s.remote_table_2" % testing.config.test_schema, ) insp = inspect(config.db) local_fkeys = insp.get_foreign_keys(local_table.name) eq_(len(local_fkeys), 1) fkey1 = local_fkeys[0] eq_(fkey1["referred_schema"], testing.config.test_schema) eq_(fkey1["referred_table"], remote_table_2.name) eq_(fkey1["referred_columns"], ["id"]) eq_(fkey1["constrained_columns"], ["remote_id"]) remote_fkeys = insp.get_foreign_keys( remote_table.name, schema=testing.config.test_schema ) eq_(len(remote_fkeys), 1) fkey2 = remote_fkeys[0] assert fkey2["referred_schema"] in ( None, testing.db.dialect.default_schema_name, ) eq_(fkey2["referred_table"], local_table.name) eq_(fkey2["referred_columns"], ["id"]) eq_(fkey2["constrained_columns"], ["local_id"]) @testing.requires.foreign_key_constraint_option_reflection_ondelete def test_get_foreign_key_options_ondelete(self): self._test_get_foreign_key_options(ondelete="CASCADE") @testing.requires.foreign_key_constraint_option_reflection_onupdate def test_get_foreign_key_options_onupdate(self): self._test_get_foreign_key_options(onupdate="SET NULL") @testing.provide_metadata def _test_get_foreign_key_options(self, **options): meta = self.metadata Table( "x", meta, Column("id", Integer, primary_key=True), test_needs_fk=True, ) Table( "table", meta, Column("id", Integer, primary_key=True), Column("x_id", Integer, sa.ForeignKey("x.id", name="xid")), Column("test", String(10)), test_needs_fk=True, ) Table( "user", meta, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("tid", Integer), sa.ForeignKeyConstraint( ["tid"], ["table.id"], name="myfk", **options ), test_needs_fk=True, ) meta.create_all() insp = inspect(meta.bind) # test 'options' is always present for a backend # that can reflect these, since alembic looks for this opts = insp.get_foreign_keys("table")[0]["options"] eq_(dict((k, opts[k]) for k in opts if opts[k]), {}) opts = insp.get_foreign_keys("user")[0]["options"] eq_(dict((k, opts[k]) for k in opts if opts[k]), options) def _assert_insp_indexes(self, indexes, expected_indexes): index_names = [d["name"] for d in indexes] for e_index in expected_indexes: assert e_index["name"] in index_names index = indexes[index_names.index(e_index["name"])] for key in e_index: eq_(e_index[key], index[key]) @testing.provide_metadata def _test_get_indexes(self, schema=None): meta = self.metadata # The database may decide to create indexes for foreign keys, etc. # so there may be more indexes than expected. insp = inspect(meta.bind) indexes = insp.get_indexes("users", schema=schema) expected_indexes = [ { "unique": False, "column_names": ["test1", "test2"], "name": "users_t_idx", }, { "unique": False, "column_names": ["user_id", "test2", "test1"], "name": "users_all_idx", }, ] self._assert_insp_indexes(indexes, expected_indexes) @testing.requires.index_reflection def test_get_indexes(self): self._test_get_indexes() @testing.requires.index_reflection @testing.requires.schemas def test_get_indexes_with_schema(self): self._test_get_indexes(schema=testing.config.test_schema) @testing.provide_metadata def _test_get_noncol_index(self, tname, ixname): meta = self.metadata insp = inspect(meta.bind) indexes = insp.get_indexes(tname) # reflecting an index that has "x DESC" in it as the column. # the DB may or may not give us "x", but make sure we get the index # back, it has a name, it's connected to the table. expected_indexes = [{"unique": False, "name": ixname}] self._assert_insp_indexes(indexes, expected_indexes) t = Table(tname, meta, autoload_with=meta.bind) eq_(len(t.indexes), 1) is_(list(t.indexes)[0].table, t) eq_(list(t.indexes)[0].name, ixname) @testing.requires.index_reflection @testing.requires.indexes_with_ascdesc def test_get_noncol_index_no_pk(self): self._test_get_noncol_index("noncol_idx_test_nopk", "noncol_idx_nopk") @testing.requires.index_reflection @testing.requires.indexes_with_ascdesc def test_get_noncol_index_pk(self): self._test_get_noncol_index("noncol_idx_test_pk", "noncol_idx_pk") @testing.requires.indexes_with_expressions @testing.provide_metadata def test_reflect_expression_based_indexes(self): Table( "t", self.metadata, Column("x", String(30)), Column("y", String(30)), ) event.listen( self.metadata, "after_create", DDL("CREATE INDEX t_idx ON t(lower(x), lower(y))"), ) event.listen( self.metadata, "after_create", DDL("CREATE INDEX t_idx_2 ON t(x)") ) self.metadata.create_all() insp = inspect(self.metadata.bind) with expect_warnings( "Skipped unsupported reflection of expression-based index t_idx" ): eq_( insp.get_indexes("t"), [{"name": "t_idx_2", "column_names": ["x"], "unique": 0}], ) @testing.requires.unique_constraint_reflection def test_get_unique_constraints(self): self._test_get_unique_constraints() @testing.requires.temp_table_reflection @testing.requires.unique_constraint_reflection def test_get_temp_table_unique_constraints(self): insp = inspect(self.bind) reflected = insp.get_unique_constraints("user_tmp") for refl in reflected: # Different dialects handle duplicate index and constraints # differently, so ignore this flag refl.pop("duplicates_index", None) eq_(reflected, [{"column_names": ["name"], "name": "user_tmp_uq"}]) @testing.requires.temp_table_reflection def test_get_temp_table_indexes(self): insp = inspect(self.bind) indexes = insp.get_indexes("user_tmp") for ind in indexes: ind.pop("dialect_options", None) eq_( # TODO: we need to add better filtering for indexes/uq constraints # that are doubled up [idx for idx in indexes if idx["name"] == "user_tmp_ix"], [ { "unique": False, "column_names": ["foo"], "name": "user_tmp_ix", } ], ) @testing.requires.unique_constraint_reflection @testing.requires.schemas def test_get_unique_constraints_with_schema(self): self._test_get_unique_constraints(schema=testing.config.test_schema) @testing.provide_metadata def _test_get_unique_constraints(self, schema=None): # SQLite dialect needs to parse the names of the constraints # separately from what it gets from PRAGMA index_list(), and # then matches them up. so same set of column_names in two # constraints will confuse it. Perhaps we should no longer # bother with index_list() here since we have the whole # CREATE TABLE? uniques = sorted( [ {"name": "unique_a", "column_names": ["a"]}, {"name": "unique_a_b_c", "column_names": ["a", "b", "c"]}, {"name": "unique_c_a_b", "column_names": ["c", "a", "b"]}, {"name": "unique_asc_key", "column_names": ["asc", "key"]}, {"name": "i.have.dots", "column_names": ["b"]}, {"name": "i have spaces", "column_names": ["c"]}, ], key=operator.itemgetter("name"), ) orig_meta = self.metadata table = Table( "testtbl", orig_meta, Column("a", sa.String(20)), Column("b", sa.String(30)), Column("c", sa.Integer), # reserved identifiers Column("asc", sa.String(30)), Column("key", sa.String(30)), schema=schema, ) for uc in uniques: table.append_constraint( sa.UniqueConstraint(*uc["column_names"], name=uc["name"]) ) orig_meta.create_all() inspector = inspect(orig_meta.bind) reflected = sorted( inspector.get_unique_constraints("testtbl", schema=schema), key=operator.itemgetter("name"), ) names_that_duplicate_index = set() for orig, refl in zip(uniques, reflected): # Different dialects handle duplicate index and constraints # differently, so ignore this flag dupe = refl.pop("duplicates_index", None) if dupe: names_that_duplicate_index.add(dupe) eq_(orig, refl) reflected_metadata = MetaData() reflected = Table( "testtbl", reflected_metadata, autoload_with=orig_meta.bind, schema=schema, ) # test "deduplicates for index" logic. MySQL and Oracle # "unique constraints" are actually unique indexes (with possible # exception of a unique that is a dupe of another one in the case # of Oracle). make sure # they aren't duplicated. idx_names = set([idx.name for idx in reflected.indexes]) uq_names = set( [ uq.name for uq in reflected.constraints if isinstance(uq, sa.UniqueConstraint) ] ).difference(["unique_c_a_b"]) assert not idx_names.intersection(uq_names) if names_that_duplicate_index: eq_(names_that_duplicate_index, idx_names) eq_(uq_names, set()) @testing.requires.check_constraint_reflection def test_get_check_constraints(self): self._test_get_check_constraints() @testing.requires.check_constraint_reflection @testing.requires.schemas def test_get_check_constraints_schema(self): self._test_get_check_constraints(schema=testing.config.test_schema) @testing.provide_metadata def _test_get_check_constraints(self, schema=None): orig_meta = self.metadata Table( "sa_cc", orig_meta, Column("a", Integer()), sa.CheckConstraint("a > 1 AND a < 5", name="cc1"), sa.CheckConstraint("a = 1 OR (a > 2 AND a < 5)", name="cc2"), schema=schema, ) orig_meta.create_all() inspector = inspect(orig_meta.bind) reflected = sorted( inspector.get_check_constraints("sa_cc", schema=schema), key=operator.itemgetter("name"), ) # trying to minimize effect of quoting, parenthesis, etc. # may need to add more to this as new dialects get CHECK # constraint reflection support def normalize(sqltext): return " ".join( re.findall(r"and|\d|=|a|or|<|>", sqltext.lower(), re.I) ) reflected = [ {"name": item["name"], "sqltext": normalize(item["sqltext"])} for item in reflected ] eq_( reflected, [ {"name": "cc1", "sqltext": "a > 1 and a < 5"}, {"name": "cc2", "sqltext": "a = 1 or a > 2 and a < 5"}, ], ) @testing.provide_metadata def _test_get_view_definition(self, schema=None): meta = self.metadata view_name1 = "users_v" view_name2 = "email_addresses_v" insp = inspect(meta.bind) v1 = insp.get_view_definition(view_name1, schema=schema) self.assert_(v1) v2 = insp.get_view_definition(view_name2, schema=schema) self.assert_(v2) @testing.requires.view_reflection def test_get_view_definition(self): self._test_get_view_definition() @testing.requires.view_reflection @testing.requires.schemas def test_get_view_definition_with_schema(self): self._test_get_view_definition(schema=testing.config.test_schema) @testing.only_on("postgresql", "PG specific feature") @testing.provide_metadata def _test_get_table_oid(self, table_name, schema=None): meta = self.metadata insp = inspect(meta.bind) oid = insp.get_table_oid(table_name, schema) self.assert_(isinstance(oid, int)) def test_get_table_oid(self): self._test_get_table_oid("users") @testing.requires.schemas def test_get_table_oid_with_schema(self): self._test_get_table_oid("users", schema=testing.config.test_schema) @testing.requires.table_reflection @testing.provide_metadata def test_autoincrement_col(self): """test that 'autoincrement' is reflected according to sqla's policy. Don't mark this test as unsupported for any backend ! (technically it fails with MySQL InnoDB since "id" comes before "id2") A backend is better off not returning "autoincrement" at all, instead of potentially returning "False" for an auto-incrementing primary key column. """ meta = self.metadata insp = inspect(meta.bind) for tname, cname in [ ("users", "user_id"), ("email_addresses", "address_id"), ("dingalings", "dingaling_id"), ]: cols = insp.get_columns(tname) id_ = {c["name"]: c for c in cols}[cname] assert id_.get("autoincrement", True) class NormalizedNameTest(fixtures.TablesTest): __requires__ = ("denormalized_names",) __backend__ = True @classmethod def define_tables(cls, metadata): Table( quoted_name("t1", quote=True), metadata, Column("id", Integer, primary_key=True), ) Table( quoted_name("t2", quote=True), metadata, Column("id", Integer, primary_key=True), Column("t1id", ForeignKey("t1.id")), ) def test_reflect_lowercase_forced_tables(self): m2 = MetaData(testing.db) t2_ref = Table(quoted_name("t2", quote=True), m2, autoload=True) t1_ref = m2.tables["t1"] assert t2_ref.c.t1id.references(t1_ref.c.id) m3 = MetaData(testing.db) m3.reflect(only=lambda name, m: name.lower() in ("t1", "t2")) assert m3.tables["t2"].c.t1id.references(m3.tables["t1"].c.id) def test_get_table_names(self): tablenames = [ t for t in inspect(testing.db).get_table_names() if t.lower() in ("t1", "t2") ] eq_(tablenames[0].upper(), tablenames[0].lower()) eq_(tablenames[1].upper(), tablenames[1].lower()) class ComputedReflectionTest(fixtures.ComputedReflectionFixtureTest): def test_computed_col_default_not_set(self): insp = inspect(config.db) cols = insp.get_columns("computed_column_table") for col in cols: if col["name"] == "with_default": is_true("42" in col["default"]) elif not col["autoincrement"]: is_(col["default"], None) def test_get_column_returns_computed(self): insp = inspect(config.db) cols = insp.get_columns("computed_default_table") data = {c["name"]: c for c in cols} for key in ("id", "normal", "with_default"): is_true("computed" not in data[key]) compData = data["computed_col"] is_true("computed" in compData) is_true("sqltext" in compData["computed"]) eq_(self.normalize(compData["computed"]["sqltext"]), "normal+42") eq_( "persisted" in compData["computed"], testing.requires.computed_columns_reflect_persisted.enabled, ) if testing.requires.computed_columns_reflect_persisted.enabled: eq_( compData["computed"]["persisted"], testing.requires.computed_columns_default_persisted.enabled, ) def check_column(self, data, column, sqltext, persisted): is_true("computed" in data[column]) compData = data[column]["computed"] eq_(self.normalize(compData["sqltext"]), sqltext) if testing.requires.computed_columns_reflect_persisted.enabled: is_true("persisted" in compData) is_(compData["persisted"], persisted) def test_get_column_returns_persisted(self): insp = inspect(config.db) cols = insp.get_columns("computed_column_table") data = {c["name"]: c for c in cols} self.check_column( data, "computed_no_flag", "normal+42", testing.requires.computed_columns_default_persisted.enabled, ) if testing.requires.computed_columns_virtual.enabled: self.check_column( data, "computed_virtual", "normal+2", False, ) if testing.requires.computed_columns_stored.enabled: self.check_column( data, "computed_stored", "normal-42", True, ) @testing.requires.schemas def test_get_column_returns_persisted_with_schema(self): insp = inspect(config.db) cols = insp.get_columns( "computed_column_table", schema=config.test_schema ) data = {c["name"]: c for c in cols} self.check_column( data, "computed_no_flag", "normal/42", testing.requires.computed_columns_default_persisted.enabled, ) if testing.requires.computed_columns_virtual.enabled: self.check_column( data, "computed_virtual", "normal/2", False, ) if testing.requires.computed_columns_stored.enabled: self.check_column( data, "computed_stored", "normal*42", True, ) __all__ = ( "ComponentReflectionTest", "HasTableTest", "NormalizedNameTest", "ComputedReflectionTest", )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_cte.py
from .. import config from .. import fixtures from ..assertions import eq_ from ..schema import Column from ..schema import Table from ... import ForeignKey from ... import Integer from ... import select from ... import String from ... import testing class CTETest(fixtures.TablesTest): __backend__ = True __requires__ = ("ctes",) run_inserts = "each" run_deletes = "each" @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), Column("parent_id", ForeignKey("some_table.id")), ) Table( "some_other_table", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), Column("parent_id", Integer), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "data": "d1", "parent_id": None}, {"id": 2, "data": "d2", "parent_id": 1}, {"id": 3, "data": "d3", "parent_id": 1}, {"id": 4, "data": "d4", "parent_id": 3}, {"id": 5, "data": "d5", "parent_id": 3}, ], ) def test_select_nonrecursive_round_trip(self): some_table = self.tables.some_table with config.db.connect() as conn: cte = ( select([some_table]) .where(some_table.c.data.in_(["d2", "d3", "d4"])) .cte("some_cte") ) result = conn.execute( select([cte.c.data]).where(cte.c.data.in_(["d4", "d5"])) ) eq_(result.fetchall(), [("d4",)]) def test_select_recursive_round_trip(self): some_table = self.tables.some_table with config.db.connect() as conn: cte = ( select([some_table]) .where(some_table.c.data.in_(["d2", "d3", "d4"])) .cte("some_cte", recursive=True) ) cte_alias = cte.alias("c1") st1 = some_table.alias() # note that SQL Server requires this to be UNION ALL, # can't be UNION cte = cte.union_all( select([st1]).where(st1.c.id == cte_alias.c.parent_id) ) result = conn.execute( select([cte.c.data]) .where(cte.c.data != "d2") .order_by(cte.c.data.desc()) ) eq_( result.fetchall(), [("d4",), ("d3",), ("d3",), ("d1",), ("d1",), ("d1",)], ) def test_insert_from_select_round_trip(self): some_table = self.tables.some_table some_other_table = self.tables.some_other_table with config.db.connect() as conn: cte = ( select([some_table]) .where(some_table.c.data.in_(["d2", "d3", "d4"])) .cte("some_cte") ) conn.execute( some_other_table.insert().from_select( ["id", "data", "parent_id"], select([cte]) ) ) eq_( conn.execute( select([some_other_table]).order_by(some_other_table.c.id) ).fetchall(), [(2, "d2", 1), (3, "d3", 1), (4, "d4", 3)], ) @testing.requires.ctes_with_update_delete @testing.requires.update_from def test_update_from_round_trip(self): some_table = self.tables.some_table some_other_table = self.tables.some_other_table with config.db.connect() as conn: conn.execute( some_other_table.insert().from_select( ["id", "data", "parent_id"], select([some_table]) ) ) cte = ( select([some_table]) .where(some_table.c.data.in_(["d2", "d3", "d4"])) .cte("some_cte") ) conn.execute( some_other_table.update() .values(parent_id=5) .where(some_other_table.c.data == cte.c.data) ) eq_( conn.execute( select([some_other_table]).order_by(some_other_table.c.id) ).fetchall(), [ (1, "d1", None), (2, "d2", 5), (3, "d3", 5), (4, "d4", 5), (5, "d5", 3), ], ) @testing.requires.ctes_with_update_delete @testing.requires.delete_from def test_delete_from_round_trip(self): some_table = self.tables.some_table some_other_table = self.tables.some_other_table with config.db.connect() as conn: conn.execute( some_other_table.insert().from_select( ["id", "data", "parent_id"], select([some_table]) ) ) cte = ( select([some_table]) .where(some_table.c.data.in_(["d2", "d3", "d4"])) .cte("some_cte") ) conn.execute( some_other_table.delete().where( some_other_table.c.data == cte.c.data ) ) eq_( conn.execute( select([some_other_table]).order_by(some_other_table.c.id) ).fetchall(), [(1, "d1", None), (5, "d5", 3)], ) @testing.requires.ctes_with_update_delete def test_delete_scalar_subq_round_trip(self): some_table = self.tables.some_table some_other_table = self.tables.some_other_table with config.db.connect() as conn: conn.execute( some_other_table.insert().from_select( ["id", "data", "parent_id"], select([some_table]) ) ) cte = ( select([some_table]) .where(some_table.c.data.in_(["d2", "d3", "d4"])) .cte("some_cte") ) conn.execute( some_other_table.delete().where( some_other_table.c.data == select([cte.c.data]).where( cte.c.id == some_other_table.c.id ) ) ) eq_( conn.execute( select([some_other_table]).order_by(some_other_table.c.id) ).fetchall(), [(1, "d1", None), (5, "d5", 3)], )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/testing/suite/test_types.py
# coding: utf-8 import datetime import decimal import json from .. import config from .. import engines from .. import fixtures from .. import mock from ..assertions import eq_ from ..assertions import is_ from ..config import requirements from ..schema import Column from ..schema import Table from ... import and_ from ... import BigInteger from ... import bindparam from ... import Boolean from ... import case from ... import cast from ... import Date from ... import DateTime from ... import Float from ... import Integer from ... import JSON from ... import literal from ... import MetaData from ... import null from ... import Numeric from ... import select from ... import String from ... import testing from ... import Text from ... import Time from ... import TIMESTAMP from ... import type_coerce from ... import Unicode from ... import UnicodeText from ... import util from ...ext.declarative import declarative_base from ...orm import Session from ...util import u class _LiteralRoundTripFixture(object): supports_whereclause = True @testing.provide_metadata def _literal_round_trip(self, type_, input_, output, filter_=None): """test literal rendering """ # for literal, we test the literal render in an INSERT # into a typed column. we can then SELECT it back as its # official type; ideally we'd be able to use CAST here # but MySQL in particular can't CAST fully t = Table("t", self.metadata, Column("x", type_)) t.create() with testing.db.connect() as conn: for value in input_: ins = ( t.insert() .values(x=literal(value)) .compile( dialect=testing.db.dialect, compile_kwargs=dict(literal_binds=True), ) ) conn.execute(ins) if self.supports_whereclause: stmt = t.select().where(t.c.x == literal(value)) else: stmt = t.select() stmt = stmt.compile( dialect=testing.db.dialect, compile_kwargs=dict(literal_binds=True), ) for row in conn.execute(stmt): value = row[0] if filter_ is not None: value = filter_(value) assert value in output class _UnicodeFixture(_LiteralRoundTripFixture): __requires__ = ("unicode_data",) data = u( "Alors vous imaginez ma 🐍 surprise, au lever du jour, " "quand une drôle de petite 🐍 voix m’a réveillé. Elle " "disait: « S’il vous plaît… dessine-moi 🐍 un mouton! »" ) @property def supports_whereclause(self): return config.requirements.expressions_against_unbounded_text.enabled @classmethod def define_tables(cls, metadata): Table( "unicode_table", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("unicode_data", cls.datatype), ) def test_round_trip(self): unicode_table = self.tables.unicode_table config.db.execute(unicode_table.insert(), {"unicode_data": self.data}) row = config.db.execute(select([unicode_table.c.unicode_data])).first() eq_(row, (self.data,)) assert isinstance(row[0], util.text_type) def test_round_trip_executemany(self): unicode_table = self.tables.unicode_table config.db.execute( unicode_table.insert(), [{"unicode_data": self.data} for i in range(3)], ) rows = config.db.execute( select([unicode_table.c.unicode_data]) ).fetchall() eq_(rows, [(self.data,) for i in range(3)]) for row in rows: assert isinstance(row[0], util.text_type) def _test_null_strings(self, connection): unicode_table = self.tables.unicode_table connection.execute(unicode_table.insert(), {"unicode_data": None}) row = connection.execute( select([unicode_table.c.unicode_data]) ).first() eq_(row, (None,)) def _test_empty_strings(self, connection): unicode_table = self.tables.unicode_table connection.execute(unicode_table.insert(), {"unicode_data": u("")}) row = connection.execute( select([unicode_table.c.unicode_data]) ).first() eq_(row, (u(""),)) def test_literal(self): self._literal_round_trip(self.datatype, [self.data], [self.data]) def test_literal_non_ascii(self): self._literal_round_trip( self.datatype, [util.u("réve🐍 illé")], [util.u("réve🐍 illé")] ) class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest): __requires__ = ("unicode_data",) __backend__ = True datatype = Unicode(255) @requirements.empty_strings_varchar def test_empty_strings_varchar(self, connection): self._test_empty_strings(connection) def test_null_strings_varchar(self, connection): self._test_null_strings(connection) class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest): __requires__ = "unicode_data", "text_type" __backend__ = True datatype = UnicodeText() @requirements.empty_strings_text def test_empty_strings_text(self, connection): self._test_empty_strings(connection) def test_null_strings_text(self, connection): self._test_null_strings(connection) class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest): __requires__ = ("text_type",) __backend__ = True @property def supports_whereclause(self): return config.requirements.expressions_against_unbounded_text.enabled @classmethod def define_tables(cls, metadata): Table( "text_table", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("text_data", Text), ) def test_text_roundtrip(self): text_table = self.tables.text_table config.db.execute(text_table.insert(), {"text_data": "some text"}) row = config.db.execute(select([text_table.c.text_data])).first() eq_(row, ("some text",)) @testing.requires.empty_strings_text def test_text_empty_strings(self, connection): text_table = self.tables.text_table connection.execute(text_table.insert(), {"text_data": ""}) row = connection.execute(select([text_table.c.text_data])).first() eq_(row, ("",)) def test_text_null_strings(self, connection): text_table = self.tables.text_table connection.execute(text_table.insert(), {"text_data": None}) row = connection.execute(select([text_table.c.text_data])).first() eq_(row, (None,)) def test_literal(self): self._literal_round_trip(Text, ["some text"], ["some text"]) def test_literal_non_ascii(self): self._literal_round_trip( Text, [util.u("réve🐍 illé")], [util.u("réve🐍 illé")] ) def test_literal_quoting(self): data = """some 'text' hey "hi there" that's text""" self._literal_round_trip(Text, [data], [data]) def test_literal_backslashes(self): data = r"backslash one \ backslash two \\ end" self._literal_round_trip(Text, [data], [data]) def test_literal_percentsigns(self): data = r"percent % signs %% percent" self._literal_round_trip(Text, [data], [data]) class StringTest(_LiteralRoundTripFixture, fixtures.TestBase): __backend__ = True @requirements.unbounded_varchar def test_nolength_string(self): metadata = MetaData() foo = Table("foo", metadata, Column("one", String)) foo.create(config.db) foo.drop(config.db) def test_literal(self): # note that in Python 3, this invokes the Unicode # datatype for the literal part because all strings are unicode self._literal_round_trip(String(40), ["some text"], ["some text"]) def test_literal_non_ascii(self): self._literal_round_trip( String(40), [util.u("réve🐍 illé")], [util.u("réve🐍 illé")] ) def test_literal_quoting(self): data = """some 'text' hey "hi there" that's text""" self._literal_round_trip(String(40), [data], [data]) def test_literal_backslashes(self): data = r"backslash one \ backslash two \\ end" self._literal_round_trip(String(40), [data], [data]) class _DateFixture(_LiteralRoundTripFixture): compare = None @classmethod def define_tables(cls, metadata): Table( "date_table", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("date_data", cls.datatype), ) def test_round_trip(self): date_table = self.tables.date_table config.db.execute(date_table.insert(), {"date_data": self.data}) row = config.db.execute(select([date_table.c.date_data])).first() compare = self.compare or self.data eq_(row, (compare,)) assert isinstance(row[0], type(compare)) def test_null(self): date_table = self.tables.date_table config.db.execute(date_table.insert(), {"date_data": None}) row = config.db.execute(select([date_table.c.date_data])).first() eq_(row, (None,)) @testing.requires.datetime_literals def test_literal(self): compare = self.compare or self.data self._literal_round_trip(self.datatype, [self.data], [compare]) @testing.requires.standalone_null_binds_whereclause def test_null_bound_comparison(self): # this test is based on an Oracle issue observed in #4886. # passing NULL for an expression that needs to be interpreted as # a certain type, does the DBAPI have the info it needs to do this. date_table = self.tables.date_table with config.db.connect() as conn: result = conn.execute( date_table.insert(), {"date_data": self.data} ) id_ = result.inserted_primary_key[0] stmt = select([date_table.c.id]).where( case( [ ( bindparam("foo", type_=self.datatype) != None, bindparam("foo", type_=self.datatype), ) ], else_=date_table.c.date_data, ) == date_table.c.date_data ) row = conn.execute(stmt, {"foo": None}).first() eq_(row[0], id_) class DateTimeTest(_DateFixture, fixtures.TablesTest): __requires__ = ("datetime",) __backend__ = True datatype = DateTime data = datetime.datetime(2012, 10, 15, 12, 57, 18) class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = ("datetime_microseconds",) __backend__ = True datatype = DateTime data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) class TimestampMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = ("timestamp_microseconds",) __backend__ = True datatype = TIMESTAMP data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) class TimeTest(_DateFixture, fixtures.TablesTest): __requires__ = ("time",) __backend__ = True datatype = Time data = datetime.time(12, 57, 18) class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = ("time_microseconds",) __backend__ = True datatype = Time data = datetime.time(12, 57, 18, 396) class DateTest(_DateFixture, fixtures.TablesTest): __requires__ = ("date",) __backend__ = True datatype = Date data = datetime.date(2012, 10, 15) class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest): __requires__ = "date", "date_coerces_from_datetime" __backend__ = True datatype = Date data = datetime.datetime(2012, 10, 15, 12, 57, 18) compare = datetime.date(2012, 10, 15) class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest): __requires__ = ("datetime_historic",) __backend__ = True datatype = DateTime data = datetime.datetime(1850, 11, 10, 11, 52, 35) class DateHistoricTest(_DateFixture, fixtures.TablesTest): __requires__ = ("date_historic",) __backend__ = True datatype = Date data = datetime.date(1727, 4, 1) class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase): __backend__ = True def test_literal(self): self._literal_round_trip(Integer, [5], [5]) def test_huge_int(self): self._round_trip(BigInteger, 1376537018368127) @testing.provide_metadata def _round_trip(self, datatype, data): metadata = self.metadata int_table = Table( "integer_table", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("integer_data", datatype), ) metadata.create_all(config.db) config.db.execute(int_table.insert(), {"integer_data": data}) row = config.db.execute(select([int_table.c.integer_data])).first() eq_(row, (data,)) if util.py3k: assert isinstance(row[0], int) else: assert isinstance(row[0], (long, int)) # noqa class NumericTest(_LiteralRoundTripFixture, fixtures.TestBase): __backend__ = True @testing.emits_warning(r".*does \*not\* support Decimal objects natively") @testing.provide_metadata def _do_test(self, type_, input_, output, filter_=None, check_scale=False): metadata = self.metadata t = Table("t", metadata, Column("x", type_)) t.create() t.insert().execute([{"x": x} for x in input_]) result = {row[0] for row in t.select().execute()} output = set(output) if filter_: result = set(filter_(x) for x in result) output = set(filter_(x) for x in output) eq_(result, output) if check_scale: eq_([str(x) for x in result], [str(x) for x in output]) @testing.emits_warning(r".*does \*not\* support Decimal objects natively") def test_render_literal_numeric(self): self._literal_round_trip( Numeric(precision=8, scale=4), [15.7563, decimal.Decimal("15.7563")], [decimal.Decimal("15.7563")], ) @testing.emits_warning(r".*does \*not\* support Decimal objects natively") def test_render_literal_numeric_asfloat(self): self._literal_round_trip( Numeric(precision=8, scale=4, asdecimal=False), [15.7563, decimal.Decimal("15.7563")], [15.7563], ) def test_render_literal_float(self): self._literal_round_trip( Float(4), [15.7563, decimal.Decimal("15.7563")], [15.7563], filter_=lambda n: n is not None and round(n, 5) or None, ) @testing.requires.precision_generic_float_type def test_float_custom_scale(self): self._do_test( Float(None, decimal_return_scale=7, asdecimal=True), [15.7563827, decimal.Decimal("15.7563827")], [decimal.Decimal("15.7563827")], check_scale=True, ) def test_numeric_as_decimal(self): self._do_test( Numeric(precision=8, scale=4), [15.7563, decimal.Decimal("15.7563")], [decimal.Decimal("15.7563")], ) def test_numeric_as_float(self): self._do_test( Numeric(precision=8, scale=4, asdecimal=False), [15.7563, decimal.Decimal("15.7563")], [15.7563], ) @testing.requires.fetch_null_from_numeric def test_numeric_null_as_decimal(self): self._do_test(Numeric(precision=8, scale=4), [None], [None]) @testing.requires.fetch_null_from_numeric def test_numeric_null_as_float(self): self._do_test( Numeric(precision=8, scale=4, asdecimal=False), [None], [None] ) @testing.requires.floats_to_four_decimals def test_float_as_decimal(self): self._do_test( Float(precision=8, asdecimal=True), [15.7563, decimal.Decimal("15.7563"), None], [decimal.Decimal("15.7563"), None], ) def test_float_as_float(self): self._do_test( Float(precision=8), [15.7563, decimal.Decimal("15.7563")], [15.7563], filter_=lambda n: n is not None and round(n, 5) or None, ) def test_float_coerce_round_trip(self): expr = 15.7563 val = testing.db.scalar(select([literal(expr)])) eq_(val, expr) # this does not work in MySQL, see #4036, however we choose not # to render CAST unconditionally since this is kind of an edge case. @testing.requires.implicit_decimal_binds @testing.emits_warning(r".*does \*not\* support Decimal objects natively") def test_decimal_coerce_round_trip(self): expr = decimal.Decimal("15.7563") val = testing.db.scalar(select([literal(expr)])) eq_(val, expr) @testing.emits_warning(r".*does \*not\* support Decimal objects natively") def test_decimal_coerce_round_trip_w_cast(self): expr = decimal.Decimal("15.7563") val = testing.db.scalar(select([cast(expr, Numeric(10, 4))])) eq_(val, expr) @testing.requires.precision_numerics_general def test_precision_decimal(self): numbers = set( [ decimal.Decimal("54.234246451650"), decimal.Decimal("0.004354"), decimal.Decimal("900.0"), ] ) self._do_test(Numeric(precision=18, scale=12), numbers, numbers) @testing.requires.precision_numerics_enotation_large def test_enotation_decimal(self): """test exceedingly small decimals. Decimal reports values with E notation when the exponent is greater than 6. """ numbers = set( [ decimal.Decimal("1E-2"), decimal.Decimal("1E-3"), decimal.Decimal("1E-4"), decimal.Decimal("1E-5"), decimal.Decimal("1E-6"), decimal.Decimal("1E-7"), decimal.Decimal("1E-8"), decimal.Decimal("0.01000005940696"), decimal.Decimal("0.00000005940696"), decimal.Decimal("0.00000000000696"), decimal.Decimal("0.70000000000696"), decimal.Decimal("696E-12"), ] ) self._do_test(Numeric(precision=18, scale=14), numbers, numbers) @testing.requires.precision_numerics_enotation_large def test_enotation_decimal_large(self): """test exceedingly large decimals. """ numbers = set( [ decimal.Decimal("4E+8"), decimal.Decimal("5748E+15"), decimal.Decimal("1.521E+15"), decimal.Decimal("00000000000000.1E+12"), ] ) self._do_test(Numeric(precision=25, scale=2), numbers, numbers) @testing.requires.precision_numerics_many_significant_digits def test_many_significant_digits(self): numbers = set( [ decimal.Decimal("31943874831932418390.01"), decimal.Decimal("319438950232418390.273596"), decimal.Decimal("87673.594069654243"), ] ) self._do_test(Numeric(precision=38, scale=12), numbers, numbers) @testing.requires.precision_numerics_retains_significant_digits def test_numeric_no_decimal(self): numbers = set([decimal.Decimal("1.000")]) self._do_test( Numeric(precision=5, scale=3), numbers, numbers, check_scale=True ) class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest): __backend__ = True @classmethod def define_tables(cls, metadata): Table( "boolean_table", metadata, Column("id", Integer, primary_key=True, autoincrement=False), Column("value", Boolean), Column("unconstrained_value", Boolean(create_constraint=False)), ) def test_render_literal_bool(self): self._literal_round_trip(Boolean(), [True, False], [True, False]) def test_round_trip(self): boolean_table = self.tables.boolean_table config.db.execute( boolean_table.insert(), {"id": 1, "value": True, "unconstrained_value": False}, ) row = config.db.execute( select( [boolean_table.c.value, boolean_table.c.unconstrained_value] ) ).first() eq_(row, (True, False)) assert isinstance(row[0], bool) @testing.requires.nullable_booleans def test_null(self): boolean_table = self.tables.boolean_table config.db.execute( boolean_table.insert(), {"id": 1, "value": None, "unconstrained_value": None}, ) row = config.db.execute( select( [boolean_table.c.value, boolean_table.c.unconstrained_value] ) ).first() eq_(row, (None, None)) def test_whereclause(self): # testing "WHERE <column>" renders a compatible expression boolean_table = self.tables.boolean_table with config.db.connect() as conn: conn.execute( boolean_table.insert(), [ {"id": 1, "value": True, "unconstrained_value": True}, {"id": 2, "value": False, "unconstrained_value": False}, ], ) eq_( conn.scalar( select([boolean_table.c.id]).where(boolean_table.c.value) ), 1, ) eq_( conn.scalar( select([boolean_table.c.id]).where( boolean_table.c.unconstrained_value ) ), 1, ) eq_( conn.scalar( select([boolean_table.c.id]).where(~boolean_table.c.value) ), 2, ) eq_( conn.scalar( select([boolean_table.c.id]).where( ~boolean_table.c.unconstrained_value ) ), 2, ) class JSONTest(_LiteralRoundTripFixture, fixtures.TablesTest): __requires__ = ("json_type",) __backend__ = True datatype = JSON @classmethod def define_tables(cls, metadata): Table( "data_table", metadata, Column("id", Integer, primary_key=True), Column("name", String(30), nullable=False), Column("data", cls.datatype), Column("nulldata", cls.datatype(none_as_null=True)), ) def test_round_trip_data1(self): self._test_round_trip({"key1": "value1", "key2": "value2"}) def _test_round_trip(self, data_element): data_table = self.tables.data_table config.db.execute( data_table.insert(), {"name": "row1", "data": data_element} ) row = config.db.execute(select([data_table.c.data])).first() eq_(row, (data_element,)) def _index_fixtures(fn): fn = testing.combinations( ("boolean", True), ("boolean", False), ("boolean", None), ("string", "some string"), ("string", None), ("string", util.u("réve illé")), ( "string", util.u("réve🐍 illé"), testing.requires.json_index_supplementary_unicode_element, ), ("integer", 15), ("integer", 1), ("integer", 0), ("integer", None), ("float", 28.5), ("float", None), # TODO: how to test for comaprison # ("json", {"foo": "bar"}), id_="sa", )(fn) return fn @_index_fixtures def test_index_typed_access(self, datatype, value): data_table = self.tables.data_table data_element = {"key1": value} with config.db.connect() as conn: conn.execute( data_table.insert(), { "name": "row1", "data": data_element, "nulldata": data_element, }, ) expr = data_table.c.data["key1"] expr = getattr(expr, "as_%s" % datatype)() roundtrip = conn.scalar(select([expr])) eq_(roundtrip, value) if util.py3k: # skip py2k to avoid comparing unicode to str etc. is_(type(roundtrip), type(value)) @_index_fixtures def test_index_typed_comparison(self, datatype, value): data_table = self.tables.data_table data_element = {"key1": value} with config.db.connect() as conn: conn.execute( data_table.insert(), { "name": "row1", "data": data_element, "nulldata": data_element, }, ) expr = data_table.c.data["key1"] expr = getattr(expr, "as_%s" % datatype)() row = conn.execute(select([expr]).where(expr == value)).first() # make sure we get a row even if value is None eq_(row, (value,)) @_index_fixtures def test_path_typed_comparison(self, datatype, value): data_table = self.tables.data_table data_element = {"key1": {"subkey1": value}} with config.db.connect() as conn: conn.execute( data_table.insert(), { "name": "row1", "data": data_element, "nulldata": data_element, }, ) expr = data_table.c.data[("key1", "subkey1")] expr = getattr(expr, "as_%s" % datatype)() row = conn.execute(select([expr]).where(expr == value)).first() # make sure we get a row even if value is None eq_(row, (value,)) @testing.combinations( (True,), (False,), (None,), (15,), (0,), (-1,), (-1.0,), (15.052,), ("a string",), (util.u("réve illé"),), (util.u("réve🐍 illé"),), ) def test_single_element_round_trip(self, element): data_table = self.tables.data_table data_element = element with config.db.connect() as conn: conn.execute( data_table.insert(), { "name": "row1", "data": data_element, "nulldata": data_element, }, ) row = conn.execute( select([data_table.c.data, data_table.c.nulldata]) ).first() eq_(row, (data_element, data_element)) def test_round_trip_custom_json(self): data_table = self.tables.data_table data_element = {"key1": "data1"} js = mock.Mock(side_effect=json.dumps) jd = mock.Mock(side_effect=json.loads) engine = engines.testing_engine( options=dict(json_serializer=js, json_deserializer=jd) ) # support sqlite :memory: database... data_table.create(engine, checkfirst=True) with engine.connect() as conn: conn.execute( data_table.insert(), {"name": "row1", "data": data_element} ) row = conn.execute(select([data_table.c.data])).first() eq_(row, (data_element,)) eq_(js.mock_calls, [mock.call(data_element)]) eq_(jd.mock_calls, [mock.call(json.dumps(data_element))]) def test_round_trip_none_as_sql_null(self, connection): col = self.tables.data_table.c["nulldata"] conn = connection conn.execute( self.tables.data_table.insert(), {"name": "r1", "data": None} ) eq_( conn.scalar( select([self.tables.data_table.c.name]).where(col.is_(null())) ), "r1", ) eq_(conn.scalar(select([col])), None) def test_round_trip_json_null_as_json_null(self, connection): col = self.tables.data_table.c["data"] conn = connection conn.execute( self.tables.data_table.insert(), {"name": "r1", "data": JSON.NULL}, ) eq_( conn.scalar( select([self.tables.data_table.c.name]).where( cast(col, String) == "null" ) ), "r1", ) eq_(conn.scalar(select([col])), None) def test_round_trip_none_as_json_null(self): col = self.tables.data_table.c["data"] with config.db.connect() as conn: conn.execute( self.tables.data_table.insert(), {"name": "r1", "data": None} ) eq_( conn.scalar( select([self.tables.data_table.c.name]).where( cast(col, String) == "null" ) ), "r1", ) eq_(conn.scalar(select([col])), None) def test_unicode_round_trip(self): # note we include Unicode supplementary characters as well with config.db.connect() as conn: conn.execute( self.tables.data_table.insert(), { "name": "r1", "data": { util.u("réve🐍 illé"): util.u("réve🐍 illé"), "data": {"k1": util.u("drôl🐍e")}, }, }, ) eq_( conn.scalar(select([self.tables.data_table.c.data])), { util.u("réve🐍 illé"): util.u("réve🐍 illé"), "data": {"k1": util.u("drôl🐍e")}, }, ) def test_eval_none_flag_orm(self): Base = declarative_base() class Data(Base): __table__ = self.tables.data_table s = Session(testing.db) d1 = Data(name="d1", data=None, nulldata=None) s.add(d1) s.commit() s.bulk_insert_mappings( Data, [{"name": "d2", "data": None, "nulldata": None}] ) eq_( s.query( cast(self.tables.data_table.c.data, String()), cast(self.tables.data_table.c.nulldata, String), ) .filter(self.tables.data_table.c.name == "d1") .first(), ("null", None), ) eq_( s.query( cast(self.tables.data_table.c.data, String()), cast(self.tables.data_table.c.nulldata, String), ) .filter(self.tables.data_table.c.name == "d2") .first(), ("null", None), ) class JSONStringCastIndexTest(_LiteralRoundTripFixture, fixtures.TablesTest): """test JSON index access with "cast to string", which we have documented for a long time as how to compare JSON values, but is ultimately not reliable in all cases. """ __requires__ = ("json_type",) __backend__ = True datatype = JSON data1 = {"key1": "value1", "key2": "value2"} data2 = { "Key 'One'": "value1", "key two": "value2", "key three": "value ' three '", } data3 = { "key1": [1, 2, 3], "key2": ["one", "two", "three"], "key3": [{"four": "five"}, {"six": "seven"}], } data4 = ["one", "two", "three"] data5 = { "nested": { "elem1": [{"a": "b", "c": "d"}, {"e": "f", "g": "h"}], "elem2": {"elem3": {"elem4": "elem5"}}, } } data6 = {"a": 5, "b": "some value", "c": {"foo": "bar"}} @classmethod def define_tables(cls, metadata): Table( "data_table", metadata, Column("id", Integer, primary_key=True), Column("name", String(30), nullable=False), Column("data", cls.datatype), Column("nulldata", cls.datatype(none_as_null=True)), ) def _criteria_fixture(self): config.db.execute( self.tables.data_table.insert(), [ {"name": "r1", "data": self.data1}, {"name": "r2", "data": self.data2}, {"name": "r3", "data": self.data3}, {"name": "r4", "data": self.data4}, {"name": "r5", "data": self.data5}, {"name": "r6", "data": self.data6}, ], ) def _test_index_criteria(self, crit, expected, test_literal=True): self._criteria_fixture() with config.db.connect() as conn: stmt = select([self.tables.data_table.c.name]).where(crit) eq_(conn.scalar(stmt), expected) if test_literal: literal_sql = str( stmt.compile( config.db, compile_kwargs={"literal_binds": True} ) ) eq_(conn.scalar(literal_sql), expected) def test_string_cast_crit_spaces_in_key(self): name = self.tables.data_table.c.name col = self.tables.data_table.c["data"] # limit the rows here to avoid PG error # "cannot extract field from a non-object", which is # fixed in 9.4 but may exist in 9.3 self._test_index_criteria( and_( name.in_(["r1", "r2", "r3"]), cast(col["key two"], String) == '"value2"', ), "r2", ) @config.requirements.json_array_indexes def test_string_cast_crit_simple_int(self): name = self.tables.data_table.c.name col = self.tables.data_table.c["data"] # limit the rows here to avoid PG error # "cannot extract array element from a non-array", which is # fixed in 9.4 but may exist in 9.3 self._test_index_criteria( and_(name == "r4", cast(col[1], String) == '"two"'), "r4" ) def test_string_cast_crit_mixed_path(self): col = self.tables.data_table.c["data"] self._test_index_criteria( cast(col[("key3", 1, "six")], String) == '"seven"', "r3" ) def test_string_cast_crit_string_path(self): col = self.tables.data_table.c["data"] self._test_index_criteria( cast(col[("nested", "elem2", "elem3", "elem4")], String) == '"elem5"', "r5", ) def test_string_cast_crit_against_string_basic(self): name = self.tables.data_table.c.name col = self.tables.data_table.c["data"] self._test_index_criteria( and_(name == "r6", cast(col["b"], String) == '"some value"'), "r6" ) def test_crit_against_string_coerce_type(self): name = self.tables.data_table.c.name col = self.tables.data_table.c["data"] self._test_index_criteria( and_( name == "r6", cast(col["b"], String) == type_coerce("some value", JSON), ), "r6", test_literal=False, ) def test_crit_against_int_basic(self): name = self.tables.data_table.c.name col = self.tables.data_table.c["data"] self._test_index_criteria( and_(name == "r6", cast(col["a"], String) == "5"), "r6" ) def test_crit_against_int_coerce_type(self): name = self.tables.data_table.c.name col = self.tables.data_table.c["data"] self._test_index_criteria( and_(name == "r6", cast(col["a"], String) == type_coerce(5, JSON)), "r6", test_literal=False, ) __all__ = ( "UnicodeVarcharTest", "UnicodeTextTest", "JSONTest", "JSONStringCastIndexTest", "DateTest", "DateTimeTest", "TextTest", "NumericTest", "IntegerTest", "DateTimeHistoricTest", "DateTimeCoercedToDateTimeTest", "TimeMicrosecondsTest", "TimestampMicrosecondsTest", "TimeTest", "DateTimeMicrosecondsTest", "DateHistoricTest", "StringTest", "BooleanTest", )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/__init__.py
# dialects/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __all__ = ( "firebird", "mssql", "mysql", "oracle", "postgresql", "sqlite", "sybase", ) from .. import util _translates = {"postgres": "postgresql"} def _auto_fn(name): """default dialect importer. plugs into the :class:`.PluginLoader` as a first-hit system. """ if "." in name: dialect, driver = name.split(".") else: dialect = name driver = "base" if dialect in _translates: translated = _translates[dialect] util.warn_deprecated( "The '%s' dialect name has been " "renamed to '%s'" % (dialect, translated) ) dialect = translated try: if dialect == "firebird": try: module = __import__("sqlalchemy_firebird") except ImportError: module = __import__("sqlalchemy.dialects.firebird").dialects module = getattr(module, dialect) elif dialect == "sybase": try: module = __import__("sqlalchemy_sybase") except ImportError: module = __import__("sqlalchemy.dialects.sybase").dialects module = getattr(module, dialect) else: module = __import__("sqlalchemy.dialects.%s" % (dialect,)).dialects module = getattr(module, dialect) except ImportError: return None if hasattr(module, driver): module = getattr(module, driver) return lambda: module.dialect else: return None registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn) plugins = util.PluginLoader("sqlalchemy.plugins")
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sybase/mxodbc.py
# sybase/mxodbc.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase+mxodbc :name: mxODBC :dbapi: mxodbc :connectstring: sybase+mxodbc://<username>:<password>@<dsnname> :url: http://www.egenix.com/ .. note:: This dialect is a stub only and is likely non functional at this time. """ from sqlalchemy.connectors.mxodbc import MxODBCConnector from sqlalchemy.dialects.sybase.base import SybaseDialect from sqlalchemy.dialects.sybase.base import SybaseExecutionContext class SybaseExecutionContext_mxodbc(SybaseExecutionContext): pass class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect): execution_ctx_cls = SybaseExecutionContext_mxodbc dialect = SybaseDialect_mxodbc
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sybase/__init__.py
# sybase/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import base # noqa from . import pyodbc # noqa from . import pysybase # noqa from .base import BIGINT from .base import BINARY from .base import BIT from .base import CHAR from .base import DATE from .base import DATETIME from .base import FLOAT from .base import IMAGE from .base import INT from .base import INTEGER from .base import MONEY from .base import NCHAR from .base import NUMERIC from .base import NVARCHAR from .base import SMALLINT from .base import SMALLMONEY from .base import TEXT from .base import TIME from .base import TINYINT from .base import UNICHAR from .base import UNITEXT from .base import UNIVARCHAR from .base import VARBINARY from .base import VARCHAR # default dialect base.dialect = dialect = pyodbc.dialect __all__ = ( "CHAR", "VARCHAR", "TIME", "NCHAR", "NVARCHAR", "TEXT", "DATE", "DATETIME", "FLOAT", "NUMERIC", "BIGINT", "INT", "INTEGER", "SMALLINT", "BINARY", "VARBINARY", "UNITEXT", "UNICHAR", "UNIVARCHAR", "IMAGE", "BIT", "MONEY", "SMALLMONEY", "TINYINT", "dialect", )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sybase/pysybase.py
# sybase/pysybase.py # Copyright (C) 2010-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase+pysybase :name: Python-Sybase :dbapi: Sybase :connectstring: sybase+pysybase://<username>:<password>@<dsn>/[database name] :url: http://python-sybase.sourceforge.net/ Unicode Support --------------- The python-sybase driver does not appear to support non-ASCII strings of any kind at this time. """ # noqa from sqlalchemy import processors from sqlalchemy import types as sqltypes from sqlalchemy.dialects.sybase.base import SybaseDialect from sqlalchemy.dialects.sybase.base import SybaseExecutionContext from sqlalchemy.dialects.sybase.base import SybaseSQLCompiler class _SybNumeric(sqltypes.Numeric): def result_processor(self, dialect, type_): if not self.asdecimal: return processors.to_float else: return sqltypes.Numeric.result_processor(self, dialect, type_) class SybaseExecutionContext_pysybase(SybaseExecutionContext): def set_ddl_autocommit(self, dbapi_connection, value): if value: # call commit() on the Sybase connection directly, # to avoid any side effects of calling a Connection # transactional method inside of pre_exec() dbapi_connection.commit() def pre_exec(self): SybaseExecutionContext.pre_exec(self) for param in self.parameters: for key in list(param): param["@" + key] = param[key] del param[key] class SybaseSQLCompiler_pysybase(SybaseSQLCompiler): def bindparam_string(self, name, **kw): return "@" + name class SybaseDialect_pysybase(SybaseDialect): driver = "pysybase" execution_ctx_cls = SybaseExecutionContext_pysybase statement_compiler = SybaseSQLCompiler_pysybase colspecs = {sqltypes.Numeric: _SybNumeric, sqltypes.Float: sqltypes.Float} @classmethod def dbapi(cls): import Sybase return Sybase def create_connect_args(self, url): opts = url.translate_connect_args(username="user", password="passwd") return ([opts.pop("host")], opts) def do_executemany(self, cursor, statement, parameters, context=None): # calling python-sybase executemany yields: # TypeError: string too long for buffer for param in parameters: cursor.execute(statement, param) def _get_server_version_info(self, connection): vers = connection.scalar("select @@version_number") # i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0), # (12, 5, 0, 0) return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10) def is_disconnect(self, e, connection, cursor): if isinstance( e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError) ): msg = str(e) return ( "Unable to complete network request to host" in msg or "Invalid connection state" in msg or "Invalid cursor state" in msg ) else: return False dialect = SybaseDialect_pysybase
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sybase/pyodbc.py
# sybase/pyodbc.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase+pyodbc :name: PyODBC :dbapi: pyodbc :connectstring: sybase+pyodbc://<username>:<password>@<dsnname>[/<database>] :url: http://pypi.python.org/pypi/pyodbc/ Unicode Support --------------- The pyodbc driver currently supports usage of these Sybase types with Unicode or multibyte strings:: CHAR NCHAR NVARCHAR TEXT VARCHAR Currently *not* supported are:: UNICHAR UNITEXT UNIVARCHAR """ # noqa import decimal from sqlalchemy import processors from sqlalchemy import types as sqltypes from sqlalchemy.connectors.pyodbc import PyODBCConnector from sqlalchemy.dialects.sybase.base import SybaseDialect from sqlalchemy.dialects.sybase.base import SybaseExecutionContext class _SybNumeric_pyodbc(sqltypes.Numeric): """Turns Decimals with adjusted() < -6 into floats. It's not yet known how to get decimals with many significant digits or very large adjusted() into Sybase via pyodbc. """ def bind_processor(self, dialect): super_process = super(_SybNumeric_pyodbc, self).bind_processor(dialect) def process(value): if self.asdecimal and isinstance(value, decimal.Decimal): if value.adjusted() < -6: return processors.to_float(value) if super_process: return super_process(value) else: return value return process class SybaseExecutionContext_pyodbc(SybaseExecutionContext): def set_ddl_autocommit(self, connection, value): if value: connection.autocommit = True else: connection.autocommit = False class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect): execution_ctx_cls = SybaseExecutionContext_pyodbc colspecs = {sqltypes.Numeric: _SybNumeric_pyodbc} dialect = SybaseDialect_pyodbc
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/sybase/base.py
# sybase/base.py # Copyright (C) 2010-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management # AG http://www.fam.ch, with coding by Alexander Houben # alexander.houben@thor-solutions.ch # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase :name: Sybase .. note:: The Sybase dialect within SQLAlchemy **is not currently supported**. It is not tested within continuous integration and is likely to have many issues and caveats not currently handled. Consider using the `external dialect <https://github.com/gordthompson/sqlalchemy-sybase>`_ instead. """ import re from sqlalchemy import exc from sqlalchemy import schema as sa_schema from sqlalchemy import types as sqltypes from sqlalchemy import util from sqlalchemy.engine import default from sqlalchemy.engine import reflection from sqlalchemy.sql import compiler from sqlalchemy.sql import text from sqlalchemy.types import BIGINT from sqlalchemy.types import BINARY from sqlalchemy.types import CHAR from sqlalchemy.types import DATE from sqlalchemy.types import DATETIME from sqlalchemy.types import DECIMAL from sqlalchemy.types import FLOAT from sqlalchemy.types import INT # noqa from sqlalchemy.types import INTEGER from sqlalchemy.types import NCHAR from sqlalchemy.types import NUMERIC from sqlalchemy.types import NVARCHAR from sqlalchemy.types import REAL from sqlalchemy.types import SMALLINT from sqlalchemy.types import TEXT from sqlalchemy.types import TIME from sqlalchemy.types import TIMESTAMP from sqlalchemy.types import Unicode from sqlalchemy.types import VARBINARY from sqlalchemy.types import VARCHAR RESERVED_WORDS = set( [ "add", "all", "alter", "and", "any", "as", "asc", "backup", "begin", "between", "bigint", "binary", "bit", "bottom", "break", "by", "call", "capability", "cascade", "case", "cast", "char", "char_convert", "character", "check", "checkpoint", "close", "comment", "commit", "connect", "constraint", "contains", "continue", "convert", "create", "cross", "cube", "current", "current_timestamp", "current_user", "cursor", "date", "dbspace", "deallocate", "dec", "decimal", "declare", "default", "delete", "deleting", "desc", "distinct", "do", "double", "drop", "dynamic", "else", "elseif", "encrypted", "end", "endif", "escape", "except", "exception", "exec", "execute", "existing", "exists", "externlogin", "fetch", "first", "float", "for", "force", "foreign", "forward", "from", "full", "goto", "grant", "group", "having", "holdlock", "identified", "if", "in", "index", "index_lparen", "inner", "inout", "insensitive", "insert", "inserting", "install", "instead", "int", "integer", "integrated", "intersect", "into", "iq", "is", "isolation", "join", "key", "lateral", "left", "like", "lock", "login", "long", "match", "membership", "message", "mode", "modify", "natural", "new", "no", "noholdlock", "not", "notify", "null", "numeric", "of", "off", "on", "open", "option", "options", "or", "order", "others", "out", "outer", "over", "passthrough", "precision", "prepare", "primary", "print", "privileges", "proc", "procedure", "publication", "raiserror", "readtext", "real", "reference", "references", "release", "remote", "remove", "rename", "reorganize", "resource", "restore", "restrict", "return", "revoke", "right", "rollback", "rollup", "save", "savepoint", "scroll", "select", "sensitive", "session", "set", "setuser", "share", "smallint", "some", "sqlcode", "sqlstate", "start", "stop", "subtrans", "subtransaction", "synchronize", "syntax_error", "table", "temporary", "then", "time", "timestamp", "tinyint", "to", "top", "tran", "trigger", "truncate", "tsequal", "unbounded", "union", "unique", "unknown", "unsigned", "update", "updating", "user", "using", "validate", "values", "varbinary", "varchar", "variable", "varying", "view", "wait", "waitfor", "when", "where", "while", "window", "with", "with_cube", "with_lparen", "with_rollup", "within", "work", "writetext", ] ) class _SybaseUnitypeMixin(object): """these types appear to return a buffer object.""" def result_processor(self, dialect, coltype): def process(value): if value is not None: return str(value) # decode("ucs-2") else: return None return process class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode): __visit_name__ = "UNICHAR" class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode): __visit_name__ = "UNIVARCHAR" class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText): __visit_name__ = "UNITEXT" class TINYINT(sqltypes.Integer): __visit_name__ = "TINYINT" class BIT(sqltypes.TypeEngine): __visit_name__ = "BIT" class MONEY(sqltypes.TypeEngine): __visit_name__ = "MONEY" class SMALLMONEY(sqltypes.TypeEngine): __visit_name__ = "SMALLMONEY" class UNIQUEIDENTIFIER(sqltypes.TypeEngine): __visit_name__ = "UNIQUEIDENTIFIER" class IMAGE(sqltypes.LargeBinary): __visit_name__ = "IMAGE" class SybaseTypeCompiler(compiler.GenericTypeCompiler): def visit_large_binary(self, type_, **kw): return self.visit_IMAGE(type_) def visit_boolean(self, type_, **kw): return self.visit_BIT(type_) def visit_unicode(self, type_, **kw): return self.visit_NVARCHAR(type_) def visit_UNICHAR(self, type_, **kw): return "UNICHAR(%d)" % type_.length def visit_UNIVARCHAR(self, type_, **kw): return "UNIVARCHAR(%d)" % type_.length def visit_UNITEXT(self, type_, **kw): return "UNITEXT" def visit_TINYINT(self, type_, **kw): return "TINYINT" def visit_IMAGE(self, type_, **kw): return "IMAGE" def visit_BIT(self, type_, **kw): return "BIT" def visit_MONEY(self, type_, **kw): return "MONEY" def visit_SMALLMONEY(self, type_, **kw): return "SMALLMONEY" def visit_UNIQUEIDENTIFIER(self, type_, **kw): return "UNIQUEIDENTIFIER" ischema_names = { "bigint": BIGINT, "int": INTEGER, "integer": INTEGER, "smallint": SMALLINT, "tinyint": TINYINT, "unsigned bigint": BIGINT, # TODO: unsigned flags "unsigned int": INTEGER, # TODO: unsigned flags "unsigned smallint": SMALLINT, # TODO: unsigned flags "numeric": NUMERIC, "decimal": DECIMAL, "dec": DECIMAL, "float": FLOAT, "double": NUMERIC, # TODO "double precision": NUMERIC, # TODO "real": REAL, "smallmoney": SMALLMONEY, "money": MONEY, "smalldatetime": DATETIME, "datetime": DATETIME, "date": DATE, "time": TIME, "char": CHAR, "character": CHAR, "varchar": VARCHAR, "character varying": VARCHAR, "char varying": VARCHAR, "unichar": UNICHAR, "unicode character": UNIVARCHAR, "nchar": NCHAR, "national char": NCHAR, "national character": NCHAR, "nvarchar": NVARCHAR, "nchar varying": NVARCHAR, "national char varying": NVARCHAR, "national character varying": NVARCHAR, "text": TEXT, "unitext": UNITEXT, "binary": BINARY, "varbinary": VARBINARY, "image": IMAGE, "bit": BIT, # not in documentation for ASE 15.7 "long varchar": TEXT, # TODO "timestamp": TIMESTAMP, "uniqueidentifier": UNIQUEIDENTIFIER, } class SybaseInspector(reflection.Inspector): def __init__(self, conn): reflection.Inspector.__init__(self, conn) def get_table_id(self, table_name, schema=None): """Return the table id from `table_name` and `schema`.""" return self.dialect.get_table_id( self.bind, table_name, schema, info_cache=self.info_cache ) class SybaseExecutionContext(default.DefaultExecutionContext): _enable_identity_insert = False def set_ddl_autocommit(self, connection, value): """Must be implemented by subclasses to accommodate DDL executions. "connection" is the raw unwrapped DBAPI connection. "value" is True or False. when True, the connection should be configured such that a DDL can take place subsequently. when False, a DDL has taken place and the connection should be resumed into non-autocommit mode. """ raise NotImplementedError() def pre_exec(self): if self.isinsert: tbl = self.compiled.statement.table seq_column = tbl._autoincrement_column insert_has_sequence = seq_column is not None if insert_has_sequence: self._enable_identity_insert = ( seq_column.key in self.compiled_parameters[0] ) else: self._enable_identity_insert = False if self._enable_identity_insert: self.cursor.execute( "SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(tbl) ) if self.isddl: # TODO: to enhance this, we can detect "ddl in tran" on the # database settings. this error message should be improved to # include a note about that. if not self.should_autocommit: raise exc.InvalidRequestError( "The Sybase dialect only supports " "DDL in 'autocommit' mode at this time." ) self.root_connection.engine.logger.info( "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')" ) self.set_ddl_autocommit( self.root_connection.connection.connection, True ) def post_exec(self): if self.isddl: self.set_ddl_autocommit(self.root_connection, False) if self._enable_identity_insert: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer.format_table( self.compiled.statement.table ) ) def get_lastrowid(self): cursor = self.create_cursor() cursor.execute("SELECT @@identity AS lastrowid") lastrowid = cursor.fetchone()[0] cursor.close() return lastrowid class SybaseSQLCompiler(compiler.SQLCompiler): ansi_bind_rules = True extract_map = util.update_copy( compiler.SQLCompiler.extract_map, {"doy": "dayofyear", "dow": "weekday", "milliseconds": "millisecond"}, ) def get_from_hint_text(self, table, text): return text def limit_clause(self, select, **kw): text = "" if select._limit_clause is not None: text += " ROWS LIMIT " + self.process(select._limit_clause, **kw) if select._offset_clause is not None: if select._limit_clause is None: text += " ROWS" text += " OFFSET " + self.process(select._offset_clause, **kw) return text def visit_extract(self, extract, **kw): field = self.extract_map.get(extract.field, extract.field) return 'DATEPART("%s", %s)' % (field, self.process(extract.expr, **kw)) def visit_now_func(self, fn, **kw): return "GETDATE()" def for_update_clause(self, select): # "FOR UPDATE" is only allowed on "DECLARE CURSOR" # which SQLAlchemy doesn't use return "" def order_by_clause(self, select, **kw): kw["literal_binds"] = True order_by = self.process(select._order_by_clause, **kw) # SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT if order_by and (not self.is_subquery() or select._limit): return " ORDER BY " + order_by else: return "" def delete_table_clause(self, delete_stmt, from_table, extra_froms): """If we have extra froms make sure we render any alias as hint.""" ashint = False if extra_froms: ashint = True return from_table._compiler_dispatch( self, asfrom=True, iscrud=True, ashint=ashint ) def delete_extra_from_clause( self, delete_stmt, from_table, extra_froms, from_hints, **kw ): """Render the DELETE .. FROM clause specific to Sybase.""" return "FROM " + ", ".join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in [from_table] + extra_froms ) class SybaseDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = ( self.preparer.format_column(column) + " " + self.dialect.type_compiler.process( column.type, type_expression=column ) ) if column.table is None: raise exc.CompileError( "The Sybase dialect requires Table-bound " "columns in order to generate DDL" ) seq_col = column.table._autoincrement_column # install a IDENTITY Sequence if we have an implicit IDENTITY column if seq_col is column: sequence = ( isinstance(column.default, sa_schema.Sequence) and column.default ) if sequence: start, increment = sequence.start or 1, sequence.increment or 1 else: start, increment = 1, 1 if (start, increment) == (1, 1): colspec += " IDENTITY" else: # TODO: need correct syntax for this colspec += " IDENTITY(%s,%s)" % (start, increment) else: default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if column.nullable is not None: if not column.nullable or column.primary_key: colspec += " NOT NULL" else: colspec += " NULL" return colspec def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX %s.%s" % ( self.preparer.quote_identifier(index.table.name), self._prepared_index_name(drop.element, include_schema=False), ) class SybaseIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS class SybaseDialect(default.DefaultDialect): name = "sybase" supports_unicode_statements = False supports_sane_rowcount = False supports_sane_multi_rowcount = False supports_native_boolean = False supports_unicode_binds = False postfetch_lastrowid = True colspecs = {} ischema_names = ischema_names type_compiler = SybaseTypeCompiler statement_compiler = SybaseSQLCompiler ddl_compiler = SybaseDDLCompiler preparer = SybaseIdentifierPreparer inspector = SybaseInspector construct_arguments = [] def _get_default_schema_name(self, connection): return connection.scalar( text("SELECT user_name() as user_name").columns(username=Unicode) ) def initialize(self, connection): super(SybaseDialect, self).initialize(connection) if ( self.server_version_info is not None and self.server_version_info < (15,) ): self.max_identifier_length = 30 else: self.max_identifier_length = 255 def get_table_id(self, connection, table_name, schema=None, **kw): """Fetch the id for schema.table_name. Several reflection methods require the table id. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_id = None if schema is None: schema = self.default_schema_name TABLEID_SQL = text( """ SELECT o.id AS id FROM sysobjects o JOIN sysusers u ON o.uid=u.uid WHERE u.name = :schema_name AND o.name = :table_name AND o.type in ('U', 'V') """ ) if util.py2k: if isinstance(schema, unicode): # noqa schema = schema.encode("ascii") if isinstance(table_name, unicode): # noqa table_name = table_name.encode("ascii") result = connection.execute( TABLEID_SQL, schema_name=schema, table_name=table_name ) table_id = result.scalar() if table_id is None: raise exc.NoSuchTableError(table_name) return table_id @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id( connection, table_name, schema, info_cache=kw.get("info_cache") ) COLUMN_SQL = text( """ SELECT col.name AS name, t.name AS type, (col.status & 8) AS nullable, (col.status & 128) AS autoincrement, com.text AS 'default', col.prec AS precision, col.scale AS scale, col.length AS length FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON col.cdefault = com.id WHERE col.usertype = t.usertype AND col.id = :table_id ORDER BY col.colid """ ) results = connection.execute(COLUMN_SQL, table_id=table_id) columns = [] for ( name, type_, nullable, autoincrement, default_, precision, scale, length, ) in results: col_info = self._get_column_info( name, type_, bool(nullable), bool(autoincrement), default_, precision, scale, length, ) columns.append(col_info) return columns def _get_column_info( self, name, type_, nullable, autoincrement, default, precision, scale, length, ): coltype = self.ischema_names.get(type_, None) kwargs = {} if coltype in (NUMERIC, DECIMAL): args = (precision, scale) elif coltype == FLOAT: args = (precision,) elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR): args = (length,) else: args = () if coltype: coltype = coltype(*args, **kwargs) # is this necessary # if is_array: # coltype = ARRAY(coltype) else: util.warn( "Did not recognize type '%s' of column '%s'" % (type_, name) ) coltype = sqltypes.NULLTYPE if default: default = default.replace("DEFAULT", "").strip() default = re.sub("^'(.*)'$", lambda m: m.group(1), default) else: default = None column_info = dict( name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement, ) return column_info @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id( connection, table_name, schema, info_cache=kw.get("info_cache") ) table_cache = {} column_cache = {} foreign_keys = [] table_cache[table_id] = {"name": table_name, "schema": schema} COLUMN_SQL = text( """ SELECT c.colid AS id, c.name AS name FROM syscolumns c WHERE c.id = :table_id """ ) results = connection.execute(COLUMN_SQL, table_id=table_id) columns = {} for col in results: columns[col["id"]] = col["name"] column_cache[table_id] = columns REFCONSTRAINT_SQL = text( """ SELECT o.name AS name, r.reftabid AS reftable_id, r.keycnt AS 'count', r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3, r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6, r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9, r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12, r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15, r.fokey16 AS fokey16, r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3, r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6, r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9, r.refkey10 AS refkey10, r.refkey11 AS refkey11, r.refkey12 AS refkey12, r.refkey13 AS refkey13, r.refkey14 AS refkey14, r.refkey15 AS refkey15, r.refkey16 AS refkey16 FROM sysreferences r JOIN sysobjects o on r.tableid = o.id WHERE r.tableid = :table_id """ ) referential_constraints = connection.execute( REFCONSTRAINT_SQL, table_id=table_id ).fetchall() REFTABLE_SQL = text( """ SELECT o.name AS name, u.name AS 'schema' FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE o.id = :table_id """ ) for r in referential_constraints: reftable_id = r["reftable_id"] if reftable_id not in table_cache: c = connection.execute(REFTABLE_SQL, table_id=reftable_id) reftable = c.fetchone() c.close() table_info = {"name": reftable["name"], "schema": None} if ( schema is not None or reftable["schema"] != self.default_schema_name ): table_info["schema"] = reftable["schema"] table_cache[reftable_id] = table_info results = connection.execute(COLUMN_SQL, table_id=reftable_id) reftable_columns = {} for col in results: reftable_columns[col["id"]] = col["name"] column_cache[reftable_id] = reftable_columns reftable = table_cache[reftable_id] reftable_columns = column_cache[reftable_id] constrained_columns = [] referred_columns = [] for i in range(1, r["count"] + 1): constrained_columns.append(columns[r["fokey%i" % i]]) referred_columns.append(reftable_columns[r["refkey%i" % i]]) fk_info = { "constrained_columns": constrained_columns, "referred_schema": reftable["schema"], "referred_table": reftable["name"], "referred_columns": referred_columns, "name": r["name"], } foreign_keys.append(fk_info) return foreign_keys @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id( connection, table_name, schema, info_cache=kw.get("info_cache") ) INDEX_SQL = text( """ SELECT object_name(i.id) AS table_name, i.keycnt AS 'count', i.name AS name, (i.status & 0x2) AS 'unique', index_col(object_name(i.id), i.indid, 1) AS col_1, index_col(object_name(i.id), i.indid, 2) AS col_2, index_col(object_name(i.id), i.indid, 3) AS col_3, index_col(object_name(i.id), i.indid, 4) AS col_4, index_col(object_name(i.id), i.indid, 5) AS col_5, index_col(object_name(i.id), i.indid, 6) AS col_6, index_col(object_name(i.id), i.indid, 7) AS col_7, index_col(object_name(i.id), i.indid, 8) AS col_8, index_col(object_name(i.id), i.indid, 9) AS col_9, index_col(object_name(i.id), i.indid, 10) AS col_10, index_col(object_name(i.id), i.indid, 11) AS col_11, index_col(object_name(i.id), i.indid, 12) AS col_12, index_col(object_name(i.id), i.indid, 13) AS col_13, index_col(object_name(i.id), i.indid, 14) AS col_14, index_col(object_name(i.id), i.indid, 15) AS col_15, index_col(object_name(i.id), i.indid, 16) AS col_16 FROM sysindexes i, sysobjects o WHERE o.id = i.id AND o.id = :table_id AND (i.status & 2048) = 0 AND i.indid BETWEEN 1 AND 254 """ ) results = connection.execute(INDEX_SQL, table_id=table_id) indexes = [] for r in results: column_names = [] for i in range(1, r["count"]): column_names.append(r["col_%i" % (i,)]) index_info = { "name": r["name"], "unique": bool(r["unique"]), "column_names": column_names, } indexes.append(index_info) return indexes @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id( connection, table_name, schema, info_cache=kw.get("info_cache") ) PK_SQL = text( """ SELECT object_name(i.id) AS table_name, i.keycnt AS 'count', i.name AS name, index_col(object_name(i.id), i.indid, 1) AS pk_1, index_col(object_name(i.id), i.indid, 2) AS pk_2, index_col(object_name(i.id), i.indid, 3) AS pk_3, index_col(object_name(i.id), i.indid, 4) AS pk_4, index_col(object_name(i.id), i.indid, 5) AS pk_5, index_col(object_name(i.id), i.indid, 6) AS pk_6, index_col(object_name(i.id), i.indid, 7) AS pk_7, index_col(object_name(i.id), i.indid, 8) AS pk_8, index_col(object_name(i.id), i.indid, 9) AS pk_9, index_col(object_name(i.id), i.indid, 10) AS pk_10, index_col(object_name(i.id), i.indid, 11) AS pk_11, index_col(object_name(i.id), i.indid, 12) AS pk_12, index_col(object_name(i.id), i.indid, 13) AS pk_13, index_col(object_name(i.id), i.indid, 14) AS pk_14, index_col(object_name(i.id), i.indid, 15) AS pk_15, index_col(object_name(i.id), i.indid, 16) AS pk_16 FROM sysindexes i, sysobjects o WHERE o.id = i.id AND o.id = :table_id AND (i.status & 2048) = 2048 AND i.indid BETWEEN 1 AND 254 """ ) results = connection.execute(PK_SQL, table_id=table_id) pks = results.fetchone() results.close() constrained_columns = [] if pks: for i in range(1, pks["count"] + 1): constrained_columns.append(pks["pk_%i" % (i,)]) return { "constrained_columns": constrained_columns, "name": pks["name"], } else: return {"constrained_columns": [], "name": None} @reflection.cache def get_schema_names(self, connection, **kw): SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u") schemas = connection.execute(SCHEMA_SQL) return [s["name"] for s in schemas] @reflection.cache def get_table_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name TABLE_SQL = text( """ SELECT o.name AS name FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE u.name = :schema_name AND o.type = 'U' """ ) if util.py2k: if isinstance(schema, unicode): # noqa schema = schema.encode("ascii") tables = connection.execute(TABLE_SQL, schema_name=schema) return [t["name"] for t in tables] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): if schema is None: schema = self.default_schema_name VIEW_DEF_SQL = text( """ SELECT c.text FROM syscomments c JOIN sysobjects o ON c.id = o.id WHERE o.name = :view_name AND o.type = 'V' """ ) if util.py2k: if isinstance(view_name, unicode): # noqa view_name = view_name.encode("ascii") view = connection.execute(VIEW_DEF_SQL, view_name=view_name) return view.scalar() @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name VIEW_SQL = text( """ SELECT o.name AS name FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE u.name = :schema_name AND o.type = 'V' """ ) if util.py2k: if isinstance(schema, unicode): # noqa schema = schema.encode("ascii") views = connection.execute(VIEW_SQL, schema_name=schema) return [v["name"] for v in views] def has_table(self, connection, table_name, schema=None): try: self.get_table_id(connection, table_name, schema) except exc.NoSuchTableError: return False else: return True
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/zxjdbc.py
# postgresql/zxjdbc.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+zxjdbc :name: zxJDBC for Jython :dbapi: zxjdbc :connectstring: postgresql+zxjdbc://scott:tiger@localhost/db :driverurl: http://jdbc.postgresql.org/ """ from .base import PGDialect from .base import PGExecutionContext from ...connectors.zxJDBC import ZxJDBCConnector class PGExecutionContext_zxjdbc(PGExecutionContext): def create_cursor(self): cursor = self._dbapi_connection.cursor() cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) return cursor class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect): jdbc_db_name = "postgresql" jdbc_driver_name = "org.postgresql.Driver" execution_ctx_cls = PGExecutionContext_zxjdbc supports_native_decimal = True def __init__(self, *args, **kwargs): super(PGDialect_zxjdbc, self).__init__(*args, **kwargs) from com.ziclix.python.sql.handler import PostgresqlDataHandler self.DataHandler = PostgresqlDataHandler def _get_server_version_info(self, connection): parts = connection.connection.dbversion.split(".") return tuple(int(x) for x in parts) dialect = PGDialect_zxjdbc
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py
# testing/engines.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r""" .. dialect:: postgresql+psycopg2cffi :name: psycopg2cffi :dbapi: psycopg2cffi :connectstring: postgresql+psycopg2cffi://user:password@host:port/dbname[?key=value&key=value...] :url: http://pypi.python.org/pypi/psycopg2cffi/ ``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C layer. This makes it suitable for use in e.g. PyPy. Documentation is as per ``psycopg2``. .. versionadded:: 1.0.0 .. seealso:: :mod:`sqlalchemy.dialects.postgresql.psycopg2` """ # noqa from .psycopg2 import PGDialect_psycopg2 class PGDialect_psycopg2cffi(PGDialect_psycopg2): driver = "psycopg2cffi" supports_unicode_statements = True # psycopg2cffi's first release is 2.5.0, but reports # __version__ as 2.4.4. Subsequent releases seem to have # fixed this. FEATURE_VERSION_MAP = dict( native_json=(2, 4, 4), native_jsonb=(2, 7, 1), sane_multi_rowcount=(2, 4, 4), array_oid=(2, 4, 4), hstore_adapter=(2, 4, 4), ) @classmethod def dbapi(cls): return __import__("psycopg2cffi") @classmethod def _psycopg2_extensions(cls): root = __import__("psycopg2cffi", fromlist=["extensions"]) return root.extensions @classmethod def _psycopg2_extras(cls): root = __import__("psycopg2cffi", fromlist=["extras"]) return root.extras dialect = PGDialect_psycopg2cffi
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/__init__.py
# postgresql/__init__.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import base from . import pg8000 # noqa from . import psycopg2 # noqa from . import psycopg2cffi # noqa from . import pygresql # noqa from . import pypostgresql # noqa from . import zxjdbc # noqa from .array import All from .array import Any from .array import ARRAY from .array import array from .base import BIGINT from .base import BIT from .base import BOOLEAN from .base import BYTEA from .base import CHAR from .base import CIDR from .base import CreateEnumType from .base import DATE from .base import DOUBLE_PRECISION from .base import DropEnumType from .base import ENUM from .base import FLOAT from .base import INET from .base import INTEGER from .base import INTERVAL from .base import MACADDR from .base import MONEY from .base import NUMERIC from .base import OID from .base import REAL from .base import REGCLASS from .base import SMALLINT from .base import TEXT from .base import TIME from .base import TIMESTAMP from .base import TSVECTOR from .base import UUID from .base import VARCHAR from .dml import Insert from .dml import insert from .ext import aggregate_order_by from .ext import array_agg from .ext import ExcludeConstraint from .hstore import HSTORE from .hstore import hstore from .json import JSON from .json import JSONB from .ranges import DATERANGE from .ranges import INT4RANGE from .ranges import INT8RANGE from .ranges import NUMRANGE from .ranges import TSRANGE from .ranges import TSTZRANGE base.dialect = dialect = psycopg2.dialect __all__ = ( "INTEGER", "BIGINT", "SMALLINT", "VARCHAR", "CHAR", "TEXT", "NUMERIC", "FLOAT", "REAL", "INET", "CIDR", "UUID", "BIT", "MACADDR", "MONEY", "OID", "REGCLASS", "DOUBLE_PRECISION", "TIMESTAMP", "TIME", "DATE", "BYTEA", "BOOLEAN", "INTERVAL", "ARRAY", "ENUM", "dialect", "array", "HSTORE", "hstore", "INT4RANGE", "INT8RANGE", "NUMRANGE", "DATERANGE", "TSVECTOR", "TSRANGE", "TSTZRANGE", "JSON", "JSONB", "Any", "All", "DropEnumType", "CreateEnumType", "ExcludeConstraint", "aggregate_order_by", "array_agg", "insert", "Insert", )
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/pygresql.py
# postgresql/pygresql.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+pygresql :name: pygresql :dbapi: pgdb :connectstring: postgresql+pygresql://user:password@host:port/dbname[?key=value&key=value...] :url: http://www.pygresql.org/ .. note:: The pygresql dialect is **not tested as part of SQLAlchemy's continuous integration** and may have unresolved issues. The recommended PostgreSQL dialect is psycopg2. """ # noqa import decimal import re from .base import _DECIMAL_TYPES from .base import _FLOAT_TYPES from .base import _INT_TYPES from .base import PGCompiler from .base import PGDialect from .base import PGIdentifierPreparer from .base import UUID from .hstore import HSTORE from .json import JSON from .json import JSONB from ... import exc from ... import processors from ... import util from ...sql.elements import Null from ...types import JSON as Json from ...types import Numeric class _PGNumeric(Numeric): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): if not isinstance(coltype, int): coltype = coltype.oid if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory( decimal.Decimal, self._effective_decimal_return_scale ) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # PyGreSQL returns Decimal natively for 1700 (numeric) return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) else: if coltype in _FLOAT_TYPES: # PyGreSQL returns float natively for 701 (float8) return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) class _PGHStore(HSTORE): def bind_processor(self, dialect): if not dialect.has_native_hstore: return super(_PGHStore, self).bind_processor(dialect) hstore = dialect.dbapi.Hstore def process(value): if isinstance(value, dict): return hstore(value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_hstore: return super(_PGHStore, self).result_processor(dialect, coltype) class _PGJSON(JSON): def bind_processor(self, dialect): if not dialect.has_native_json: return super(_PGJSON, self).bind_processor(dialect) json = dialect.dbapi.Json def process(value): if value is self.NULL: value = None elif isinstance(value, Null) or ( value is None and self.none_as_null ): return None if value is None or isinstance(value, (dict, list)): return json(value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_json: return super(_PGJSON, self).result_processor(dialect, coltype) class _PGJSONB(JSONB): def bind_processor(self, dialect): if not dialect.has_native_json: return super(_PGJSONB, self).bind_processor(dialect) json = dialect.dbapi.Json def process(value): if value is self.NULL: value = None elif isinstance(value, Null) or ( value is None and self.none_as_null ): return None if value is None or isinstance(value, (dict, list)): return json(value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_json: return super(_PGJSONB, self).result_processor(dialect, coltype) class _PGUUID(UUID): def bind_processor(self, dialect): if not dialect.has_native_uuid: return super(_PGUUID, self).bind_processor(dialect) uuid = dialect.dbapi.Uuid def process(value): if value is None: return None if isinstance(value, (str, bytes)): if len(value) == 16: return uuid(bytes=value) return uuid(value) if isinstance(value, int): return uuid(int=value) return value return process def result_processor(self, dialect, coltype): if not dialect.has_native_uuid: return super(_PGUUID, self).result_processor(dialect, coltype) if not self.as_uuid: def process(value): if value is not None: return str(value) return process class _PGCompiler(PGCompiler): def visit_mod_binary(self, binary, operator, **kw): return ( self.process(binary.left, **kw) + " %% " + self.process(binary.right, **kw) ) def post_process_text(self, text): return text.replace("%", "%%") class _PGIdentifierPreparer(PGIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace("%", "%%") class PGDialect_pygresql(PGDialect): driver = "pygresql" statement_compiler = _PGCompiler preparer = _PGIdentifierPreparer @classmethod def dbapi(cls): import pgdb return pgdb colspecs = util.update_copy( PGDialect.colspecs, { Numeric: _PGNumeric, HSTORE: _PGHStore, Json: _PGJSON, JSON: _PGJSON, JSONB: _PGJSONB, UUID: _PGUUID, }, ) def __init__(self, **kwargs): super(PGDialect_pygresql, self).__init__(**kwargs) try: version = self.dbapi.version m = re.match(r"(\d+)\.(\d+)", version) version = (int(m.group(1)), int(m.group(2))) except (AttributeError, ValueError, TypeError): version = (0, 0) self.dbapi_version = version if version < (5, 0): has_native_hstore = has_native_json = has_native_uuid = False if version != (0, 0): util.warn( "PyGreSQL is only fully supported by SQLAlchemy" " since version 5.0." ) else: self.supports_unicode_statements = True self.supports_unicode_binds = True has_native_hstore = has_native_json = has_native_uuid = True self.has_native_hstore = has_native_hstore self.has_native_json = has_native_json self.has_native_uuid = has_native_uuid def create_connect_args(self, url): opts = url.translate_connect_args(username="user") if "port" in opts: opts["host"] = "%s:%s" % ( opts.get("host", "").rsplit(":", 1)[0], opts.pop("port"), ) opts.update(url.query) return [], opts def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.Error): if not connection: return False try: connection = connection.connection except AttributeError: pass else: if not connection: return False try: return connection.closed except AttributeError: # PyGreSQL < 5.0 return connection._cnx is None return False dialect = PGDialect_pygresql
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/ranges.py
# Copyright (C) 2013-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from ... import types as sqltypes __all__ = ("INT4RANGE", "INT8RANGE", "NUMRANGE") class RangeOperators(object): """ This mixin provides functionality for the Range Operators listed in Table 9-44 of the `postgres documentation`__ for Range Functions and Operators. It is used by all the range types provided in the ``postgres`` dialect and can likely be used for any range types you create yourself. __ http://www.postgresql.org/docs/devel/static/functions-range.html No extra support is provided for the Range Functions listed in Table 9-45 of the postgres documentation. For these, the normal :func:`~sqlalchemy.sql.expression.func` object should be used. """ class comparator_factory(sqltypes.Concatenable.Comparator): """Define comparison operations for range types.""" def __ne__(self, other): "Boolean expression. Returns true if two ranges are not equal" if other is None: return super(RangeOperators.comparator_factory, self).__ne__( other ) else: return self.expr.op("<>")(other) def contains(self, other, **kw): """Boolean expression. Returns true if the right hand operand, which can be an element or a range, is contained within the column. """ return self.expr.op("@>")(other) def contained_by(self, other): """Boolean expression. Returns true if the column is contained within the right hand operand. """ return self.expr.op("<@")(other) def overlaps(self, other): """Boolean expression. Returns true if the column overlaps (has points in common with) the right hand operand. """ return self.expr.op("&&")(other) def strictly_left_of(self, other): """Boolean expression. Returns true if the column is strictly left of the right hand operand. """ return self.expr.op("<<")(other) __lshift__ = strictly_left_of def strictly_right_of(self, other): """Boolean expression. Returns true if the column is strictly right of the right hand operand. """ return self.expr.op(">>")(other) __rshift__ = strictly_right_of def not_extend_right_of(self, other): """Boolean expression. Returns true if the range in the column does not extend right of the range in the operand. """ return self.expr.op("&<")(other) def not_extend_left_of(self, other): """Boolean expression. Returns true if the range in the column does not extend left of the range in the operand. """ return self.expr.op("&>")(other) def adjacent_to(self, other): """Boolean expression. Returns true if the range in the column is adjacent to the range in the operand. """ return self.expr.op("-|-")(other) def __add__(self, other): """Range expression. Returns the union of the two ranges. Will raise an exception if the resulting range is not contigous. """ return self.expr.op("+")(other) class INT4RANGE(RangeOperators, sqltypes.TypeEngine): """Represent the PostgreSQL INT4RANGE type. """ __visit_name__ = "INT4RANGE" class INT8RANGE(RangeOperators, sqltypes.TypeEngine): """Represent the PostgreSQL INT8RANGE type. """ __visit_name__ = "INT8RANGE" class NUMRANGE(RangeOperators, sqltypes.TypeEngine): """Represent the PostgreSQL NUMRANGE type. """ __visit_name__ = "NUMRANGE" class DATERANGE(RangeOperators, sqltypes.TypeEngine): """Represent the PostgreSQL DATERANGE type. """ __visit_name__ = "DATERANGE" class TSRANGE(RangeOperators, sqltypes.TypeEngine): """Represent the PostgreSQL TSRANGE type. """ __visit_name__ = "TSRANGE" class TSTZRANGE(RangeOperators, sqltypes.TypeEngine): """Represent the PostgreSQL TSTZRANGE type. """ __visit_name__ = "TSTZRANGE"
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py
# postgresql/pypostgresql.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+pypostgresql :name: py-postgresql :dbapi: pypostgresql :connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...] :url: http://python.projects.pgfoundry.org/ .. note:: The pypostgresql dialect is **not tested as part of SQLAlchemy's continuous integration** and may have unresolved issues. The recommended PostgreSQL driver is psycopg2. """ # noqa from .base import PGDialect from .base import PGExecutionContext from ... import processors from ... import types as sqltypes from ... import util class PGNumeric(sqltypes.Numeric): def bind_processor(self, dialect): return processors.to_str def result_processor(self, dialect, coltype): if self.asdecimal: return None else: return processors.to_float class PGExecutionContext_pypostgresql(PGExecutionContext): pass class PGDialect_pypostgresql(PGDialect): driver = "pypostgresql" supports_unicode_statements = True supports_unicode_binds = True description_encoding = None default_paramstyle = "pyformat" # requires trunk version to support sane rowcounts # TODO: use dbapi version information to set this flag appropriately supports_sane_rowcount = True supports_sane_multi_rowcount = False execution_ctx_cls = PGExecutionContext_pypostgresql colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric: PGNumeric, # prevents PGNumeric from being used sqltypes.Float: sqltypes.Float, }, ) @classmethod def dbapi(cls): from postgresql.driver import dbapi20 return dbapi20 _DBAPI_ERROR_NAMES = [ "Error", "InterfaceError", "DatabaseError", "DataError", "OperationalError", "IntegrityError", "InternalError", "ProgrammingError", "NotSupportedError", ] @util.memoized_property def dbapi_exception_translation_map(self): if self.dbapi is None: return {} return dict( (getattr(self.dbapi, name).__name__, name) for name in self._DBAPI_ERROR_NAMES ) def create_connect_args(self, url): opts = url.translate_connect_args(username="user") if "port" in opts: opts["port"] = int(opts["port"]) else: opts["port"] = 5432 opts.update(url.query) return ([], opts) def is_disconnect(self, e, connection, cursor): return "connection is closed" in str(e) dialect = PGDialect_pypostgresql
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py
# postgresql/psycopg2.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r""" .. dialect:: postgresql+psycopg2 :name: psycopg2 :dbapi: psycopg2 :connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...] :url: http://pypi.python.org/pypi/psycopg2/ psycopg2 Connect Arguments ----------------------------------- psycopg2-specific keyword arguments which are accepted by :func:`_sa.create_engine()` are: * ``server_side_cursors``: Enable the usage of "server side cursors" for SQL statements which support this feature. What this essentially means from a psycopg2 point of view is that the cursor is created using a name, e.g. ``connection.cursor('some name')``, which has the effect that result rows are not immediately pre-fetched and buffered after statement execution, but are instead left on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows at a time are fetched over the wire to reduce conversational overhead. Note that the :paramref:`.Connection.execution_options.stream_results` execution option is a more targeted way of enabling this mode on a per-execution basis. * ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode per connection. True by default. .. seealso:: :ref:`psycopg2_disable_native_unicode` * ``isolation_level``: This option, available for all PostgreSQL dialects, includes the ``AUTOCOMMIT`` isolation level when using the psycopg2 dialect. .. seealso:: :ref:`psycopg2_isolation_level` * ``client_encoding``: sets the client encoding in a libpq-agnostic way, using psycopg2's ``set_client_encoding()`` method. .. seealso:: :ref:`psycopg2_unicode` * ``executemany_mode``, ``executemany_batch_page_size``, ``executemany_values_page_size``: Allows use of psycopg2 extensions for optimizing "executemany"-stye queries. See the referenced section below for details. .. seealso:: :ref:`psycopg2_executemany_mode` * ``use_batch_mode``: this is the previous setting used to affect "executemany" mode and is now deprecated. Unix Domain Connections ------------------------ psycopg2 supports connecting via Unix domain connections. When the ``host`` portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2, which specifies Unix-domain communication rather than TCP/IP communication:: create_engine("postgresql+psycopg2://user:password@/dbname") By default, the socket file used is to connect to a Unix-domain socket in ``/tmp``, or whatever socket directory was specified when PostgreSQL was built. This value can be overridden by passing a pathname to psycopg2, using ``host`` as an additional keyword argument:: create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql") .. seealso:: `PQconnectdbParams \ <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_ Empty DSN Connections / Environment Variable Connections --------------------------------------------------------- The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the libpq client library, which by default indicates to connect to a localhost PostgreSQL database that is open for "trust" connections. This behavior can be further tailored using a particular set of environment variables which are prefixed with ``PG_...``, which are consumed by ``libpq`` to take the place of any or all elements of the connection string. For this form, the URL can be passed without any elements other than the initial scheme:: engine = create_engine('postgresql+psycopg2://') In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()`` function which in turn represents an empty DSN passed to libpq. .. versionadded:: 1.3.2 support for parameter-less connections with psycopg2. .. seealso:: `Environment Variables\ <https://www.postgresql.org/docs/current/libpq-envars.html>`_ - PostgreSQL documentation on how to use ``PG_...`` environment variables for connections. .. _psycopg2_execution_options: Per-Statement/Connection Execution Options ------------------------------------------- The following DBAPI-specific options are respected when used with :meth:`_engine.Connection.execution_options`, :meth:`.Executable.execution_options`, :meth:`_query.Query.execution_options`, in addition to those not specific to DBAPIs: * ``isolation_level`` - Set the transaction isolation level for the lifespan of a :class:`_engine.Connection` (can only be set on a connection, not a statement or query). See :ref:`psycopg2_isolation_level`. * ``stream_results`` - Enable or disable usage of psycopg2 server side cursors - this feature makes use of "named" cursors in combination with special result handling methods so that result rows are not fully buffered. If ``None`` or not set, the ``server_side_cursors`` option of the :class:`_engine.Engine` is used. * ``max_row_buffer`` - when using ``stream_results``, an integer value that specifies the maximum number of rows to buffer at a time. This is interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the buffer will grow to ultimately store 1000 rows at a time. .. versionadded:: 1.0.6 .. _psycopg2_batch_mode: .. _psycopg2_executemany_mode: Psycopg2 Fast Execution Helpers ------------------------------- Modern versions of psycopg2 include a feature known as `Fast Execution Helpers \ <http://initd.org/psycopg/docs/extras.html#fast-execution-helpers>`_, which have been shown in benchmarking to improve psycopg2's executemany() performance, primarily with INSERT statements, by multiple orders of magnitude. SQLAlchemy allows this extension to be used for all ``executemany()`` style calls invoked by an :class:`_engine.Engine` when used with :ref:`multiple parameter sets <execute_multiple>`, which includes the use of this feature both by the Core as well as by the ORM for inserts of objects with non-autogenerated primary key values, by adding the ``executemany_mode`` flag to :func:`_sa.create_engine`:: engine = create_engine( "postgresql+psycopg2://scott:tiger@host/dbname", executemany_mode='batch') .. versionchanged:: 1.3.7 - the ``use_batch_mode`` flag has been superseded by a new parameter ``executemany_mode`` which provides support both for psycopg2's ``execute_batch`` helper as well as the ``execute_values`` helper. Possible options for ``executemany_mode`` include: * ``None`` - By default, psycopg2's extensions are not used, and the usual ``cursor.executemany()`` method is used when invoking batches of statements. * ``'batch'`` - Uses ``psycopg2.extras.execute_batch`` so that multiple copies of a SQL query, each one corresponding to a parameter set passed to ``executemany()``, are joined into a single SQL string separated by a semicolon. This is the same behavior as was provided by the ``use_batch_mode=True`` flag. * ``'values'``- For Core :func:`_expression.insert` constructs only (including those emitted by the ORM automatically), the ``psycopg2.extras.execute_values`` extension is used so that multiple parameter sets are grouped into a single INSERT statement and joined together with multiple VALUES expressions. This method requires that the string text of the VALUES clause inside the INSERT statement is manipulated, so is only supported with a compiled :func:`_expression.insert` construct where the format is predictable. For all other constructs, including plain textual INSERT statements not rendered by the SQLAlchemy expression language compiler, the ``psycopg2.extras.execute_batch`` method is used. It is therefore important to note that **"values" mode implies that "batch" mode is also used for all statements for which "values" mode does not apply**. For both strategies, the ``executemany_batch_page_size`` and ``executemany_values_page_size`` arguments control how many parameter sets should be represented in each execution. Because "values" mode implies a fallback down to "batch" mode for non-INSERT statements, there are two independent page size arguments. For each, the default value of ``None`` means to use psycopg2's defaults, which at the time of this writing are quite low at 100. For the ``execute_values`` method, a number as high as 10000 may prove to be performant, whereas for ``execute_batch``, as the number represents full statements repeated, a number closer to the default of 100 is likely more appropriate:: engine = create_engine( "postgresql+psycopg2://scott:tiger@host/dbname", executemany_mode='values', executemany_values_page_size=10000, executemany_batch_page_size=500) .. seealso:: :ref:`execute_multiple` - General information on using the :class:`_engine.Connection` object to execute statements in such a way as to make use of the DBAPI ``.executemany()`` method. .. versionchanged:: 1.3.7 - Added support for ``psycopg2.extras.execute_values``. The ``use_batch_mode`` flag is superseded by the ``executemany_mode`` flag. .. _psycopg2_unicode: Unicode with Psycopg2 ---------------------- By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` extension, such that the DBAPI receives and returns all strings as Python Unicode objects directly - SQLAlchemy passes these values through without change. Psycopg2 here will encode/decode string values based on the current "client encoding" setting; by default this is the value in the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. Typically, this can be changed to ``utf8``, as a more useful default:: # postgresql.conf file # client_encoding = sql_ascii # actually, defaults to database # encoding client_encoding = utf8 A second way to affect the client encoding is to set it within Psycopg2 locally. SQLAlchemy will call psycopg2's :meth:`psycopg2:connection.set_client_encoding` method on all new connections based on the value passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter:: # set_client_encoding() setting; # works for *all* PostgreSQL versions engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8') This overrides the encoding specified in the PostgreSQL client configuration. When using the parameter in this way, the psycopg2 driver emits ``SET client_encoding TO 'utf8'`` on the connection explicitly, and works in all PostgreSQL versions. Note that the ``client_encoding`` setting as passed to :func:`_sa.create_engine` is **not the same** as the more recently added ``client_encoding`` parameter now supported by libpq directly. This is enabled when ``client_encoding`` is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed using the :paramref:`_sa.create_engine.connect_args` parameter:: engine = create_engine( "postgresql://user:pass@host/dbname", connect_args={'client_encoding': 'utf8'}) # using the query string is equivalent engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8") The above parameter was only added to libpq as of version 9.1 of PostgreSQL, so using the previous method is better for cross-version support. .. _psycopg2_disable_native_unicode: Disabling Native Unicode ^^^^^^^^^^^^^^^^^^^^^^^^ SQLAlchemy can also be instructed to skip the usage of the psycopg2 ``UNICODE`` extension and to instead utilize its own unicode encode/decode services, which are normally reserved only for those DBAPIs that don't fully support unicode directly. Passing ``use_native_unicode=False`` to :func:`_sa.create_engine` will disable usage of ``psycopg2.extensions. UNICODE``. SQLAlchemy will instead encode data itself into Python bytestrings on the way in and coerce from bytes on the way back, using the value of the :func:`_sa.create_engine` ``encoding`` parameter, which defaults to ``utf-8``. SQLAlchemy's own unicode encode/decode functionality is steadily becoming obsolete as most DBAPIs now support unicode fully. Bound Parameter Styles ---------------------- The default parameter style for the psycopg2 dialect is "pyformat", where SQL is rendered using ``%(paramname)s`` style. This format has the limitation that it does not accommodate the unusual case of parameter names that actually contain percent or parenthesis symbols; as SQLAlchemy in many cases generates bound parameter names based on the name of a column, the presence of these characters in a column name can lead to problems. There are two solutions to the issue of a :class:`_schema.Column` that contains one of these characters in its name. One is to specify the :paramref:`.schema.Column.key` for columns that have such names:: measurement = Table('measurement', metadata, Column('Size (meters)', Integer, key='size_meters') ) Above, an INSERT statement such as ``measurement.insert()`` will use ``size_meters`` as the parameter name, and a SQL expression such as ``measurement.c.size_meters > 10`` will derive the bound parameter name from the ``size_meters`` key as well. .. versionchanged:: 1.0.0 - SQL expressions will use :attr:`_schema.Column.key` as the source of naming when anonymous bound parameters are created in SQL expressions; previously, this behavior only applied to :meth:`_schema.Table.insert` and :meth:`_schema.Table.update` parameter names. The other solution is to use a positional format; psycopg2 allows use of the "format" paramstyle, which can be passed to :paramref:`_sa.create_engine.paramstyle`:: engine = create_engine( 'postgresql://scott:tiger@localhost:5432/test', paramstyle='format') With the above engine, instead of a statement like:: INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s) {'Size (meters)': 1} we instead see:: INSERT INTO measurement ("Size (meters)") VALUES (%s) (1, ) Where above, the dictionary style is converted into a tuple with positional style. Transactions ------------ The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. .. _psycopg2_isolation_level: Psycopg2 Transaction Isolation Level ------------------------------------- As discussed in :ref:`postgresql_isolation_level`, all PostgreSQL dialects support setting of transaction isolation level both via the ``isolation_level`` parameter passed to :func:`_sa.create_engine` , as well as the ``isolation_level`` argument used by :meth:`_engine.Connection.execution_options`. When using the psycopg2 dialect , these options make use of psycopg2's ``set_isolation_level()`` connection method, rather than emitting a PostgreSQL directive; this is because psycopg2's API-level setting is always emitted at the start of each transaction in any case. The psycopg2 dialect supports these constants for isolation level: * ``READ COMMITTED`` * ``READ UNCOMMITTED`` * ``REPEATABLE READ`` * ``SERIALIZABLE`` * ``AUTOCOMMIT`` .. seealso:: :ref:`postgresql_isolation_level` :ref:`pg8000_isolation_level` NOTICE logging --------------- The psycopg2 dialect will log PostgreSQL NOTICE messages via the ``sqlalchemy.dialects.postgresql`` logger. When this logger is set to the ``logging.INFO`` level, notice messages will be logged:: import logging logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) Above, it is assumed that logging is configured externally. If this is not the case, configuration such as ``logging.basicConfig()`` must be utilized:: import logging logging.basicConfig() # log messages to stdout logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) .. seealso:: `Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website .. _psycopg2_hstore: HSTORE type ------------ The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension by default when psycopg2 version 2.4 or greater is used, and it is detected that the target database has the HSTORE type set up for use. In other words, when the dialect makes the first connection, a sequence like the following is performed: 1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``. If this function returns a list of HSTORE identifiers, we then determine that the ``HSTORE`` extension is present. This function is **skipped** if the version of psycopg2 installed is less than version 2.4. 2. If the ``use_native_hstore`` flag is at its default of ``True``, and we've detected that ``HSTORE`` oids are available, the ``psycopg2.extensions.register_hstore()`` extension is invoked for all connections. The ``register_hstore()`` extension has the effect of **all Python dictionaries being accepted as parameters regardless of the type of target column in SQL**. The dictionaries are converted by this extension into a textual HSTORE expression. If this behavior is not desired, disable the use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows:: engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", use_native_hstore=False) The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()`` extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE string format, on both the parameter side and the result side, will take place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which may be more performant. """ # noqa from __future__ import absolute_import import decimal import logging import re from .base import _DECIMAL_TYPES from .base import _FLOAT_TYPES from .base import _INT_TYPES from .base import ENUM from .base import PGCompiler from .base import PGDialect from .base import PGExecutionContext from .base import PGIdentifierPreparer from .base import UUID from .hstore import HSTORE from .json import JSON from .json import JSONB from ... import exc from ... import processors from ... import types as sqltypes from ... import util from ...engine import result as _result from ...util import collections_abc try: from uuid import UUID as _python_UUID # noqa except ImportError: _python_UUID = None logger = logging.getLogger("sqlalchemy.dialects.postgresql") class _PGNumeric(sqltypes.Numeric): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory( decimal.Decimal, self._effective_decimal_return_scale ) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # pg8000 returns Decimal natively for 1700 return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) else: if coltype in _FLOAT_TYPES: # pg8000 returns float natively for 701 return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) class _PGEnum(ENUM): def result_processor(self, dialect, coltype): if util.py2k and self._expect_unicode is True: # for py2k, if the enum type needs unicode data (which is set up as # part of the Enum() constructor based on values passed as py2k # unicode objects) we have to use our own converters since # psycopg2's don't work, a rare exception to the "modern DBAPIs # support unicode everywhere" theme of deprecating # convert_unicode=True. Use the special "force_nocheck" directive # which forces unicode conversion to happen on the Python side # without an isinstance() check. in py3k psycopg2 does the right # thing automatically. self._expect_unicode = "force_nocheck" return super(_PGEnum, self).result_processor(dialect, coltype) class _PGHStore(HSTORE): def bind_processor(self, dialect): if dialect._has_native_hstore: return None else: return super(_PGHStore, self).bind_processor(dialect) def result_processor(self, dialect, coltype): if dialect._has_native_hstore: return None else: return super(_PGHStore, self).result_processor(dialect, coltype) class _PGJSON(JSON): def result_processor(self, dialect, coltype): if dialect._has_native_json: return None else: return super(_PGJSON, self).result_processor(dialect, coltype) class _PGJSONB(JSONB): def result_processor(self, dialect, coltype): if dialect._has_native_jsonb: return None else: return super(_PGJSONB, self).result_processor(dialect, coltype) class _PGUUID(UUID): def bind_processor(self, dialect): if not self.as_uuid and dialect.use_native_uuid: def process(value): if value is not None: value = _python_UUID(value) return value return process def result_processor(self, dialect, coltype): if not self.as_uuid and dialect.use_native_uuid: def process(value): if value is not None: value = str(value) return value return process _server_side_id = util.counter() class PGExecutionContext_psycopg2(PGExecutionContext): def create_server_side_cursor(self): # use server-side cursors: # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:]) return self._dbapi_connection.cursor(ident) def get_result_proxy(self): self._log_notices(self.cursor) if self._is_server_side: return _result.BufferedRowResultProxy(self) else: return _result.ResultProxy(self) def _log_notices(self, cursor): # check also that notices is an iterable, after it's already # established that we will be iterating through it. This is to get # around test suites such as SQLAlchemy's using a Mock object for # cursor if not cursor.connection.notices or not isinstance( cursor.connection.notices, collections_abc.Iterable ): return for notice in cursor.connection.notices: # NOTICE messages have a # newline character at the end logger.info(notice.rstrip()) cursor.connection.notices[:] = [] class PGCompiler_psycopg2(PGCompiler): pass class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer): pass EXECUTEMANY_DEFAULT = util.symbol("executemany_default") EXECUTEMANY_BATCH = util.symbol("executemany_batch") EXECUTEMANY_VALUES = util.symbol("executemany_values") class PGDialect_psycopg2(PGDialect): driver = "psycopg2" if util.py2k: supports_unicode_statements = False supports_server_side_cursors = True default_paramstyle = "pyformat" # set to true based on psycopg2 version supports_sane_multi_rowcount = False execution_ctx_cls = PGExecutionContext_psycopg2 statement_compiler = PGCompiler_psycopg2 preparer = PGIdentifierPreparer_psycopg2 psycopg2_version = (0, 0) FEATURE_VERSION_MAP = dict( native_json=(2, 5), native_jsonb=(2, 5, 4), sane_multi_rowcount=(2, 0, 9), array_oid=(2, 4, 3), hstore_adapter=(2, 4), ) _has_native_hstore = False _has_native_json = False _has_native_jsonb = False engine_config_types = PGDialect.engine_config_types.union( [("use_native_unicode", util.asbool)] ) colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric: _PGNumeric, ENUM: _PGEnum, # needs force_unicode sqltypes.Enum: _PGEnum, # needs force_unicode HSTORE: _PGHStore, JSON: _PGJSON, sqltypes.JSON: _PGJSON, JSONB: _PGJSONB, UUID: _PGUUID, }, ) @util.deprecated_params( use_batch_mode=( "1.3.7", "The psycopg2 use_batch_mode flag is superseded by " "executemany_mode='batch'", ) ) def __init__( self, server_side_cursors=False, use_native_unicode=True, client_encoding=None, use_native_hstore=True, use_native_uuid=True, executemany_mode=None, executemany_batch_page_size=None, executemany_values_page_size=None, use_batch_mode=None, **kwargs ): PGDialect.__init__(self, **kwargs) self.server_side_cursors = server_side_cursors self.use_native_unicode = use_native_unicode self.use_native_hstore = use_native_hstore self.use_native_uuid = use_native_uuid self.supports_unicode_binds = use_native_unicode self.client_encoding = client_encoding # Parse executemany_mode argument, allowing it to be only one of the # symbol names self.executemany_mode = util.symbol.parse_user_argument( executemany_mode, { EXECUTEMANY_DEFAULT: [None], EXECUTEMANY_BATCH: ["batch"], EXECUTEMANY_VALUES: ["values"], }, "executemany_mode", ) if use_batch_mode: self.executemany_mode = EXECUTEMANY_BATCH self.executemany_batch_page_size = executemany_batch_page_size self.executemany_values_page_size = executemany_values_page_size if self.dbapi and hasattr(self.dbapi, "__version__"): m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__) if m: self.psycopg2_version = tuple( int(x) for x in m.group(1, 2, 3) if x is not None ) def initialize(self, connection): super(PGDialect_psycopg2, self).initialize(connection) self._has_native_hstore = ( self.use_native_hstore and self._hstore_oids(connection.connection) is not None ) self._has_native_json = ( self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_json"] ) self._has_native_jsonb = ( self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_jsonb"] ) # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9 self.supports_sane_multi_rowcount = ( self.psycopg2_version >= self.FEATURE_VERSION_MAP["sane_multi_rowcount"] and self.executemany_mode is EXECUTEMANY_DEFAULT ) @classmethod def dbapi(cls): import psycopg2 return psycopg2 @classmethod def _psycopg2_extensions(cls): from psycopg2 import extensions return extensions @classmethod def _psycopg2_extras(cls): from psycopg2 import extras return extras @util.memoized_property def _isolation_lookup(self): extensions = self._psycopg2_extensions() return { "AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT, "READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED, "READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED, "REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ, "SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE, } def set_isolation_level(self, connection, level): try: level = self._isolation_lookup[level.replace("_", " ")] except KeyError as err: util.raise_( exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ), replace_context=err, ) connection.set_isolation_level(level) def on_connect(self): extras = self._psycopg2_extras() extensions = self._psycopg2_extensions() fns = [] if self.client_encoding is not None: def on_connect(conn): conn.set_client_encoding(self.client_encoding) fns.append(on_connect) if self.isolation_level is not None: def on_connect(conn): self.set_isolation_level(conn, self.isolation_level) fns.append(on_connect) if self.dbapi and self.use_native_uuid: def on_connect(conn): extras.register_uuid(None, conn) fns.append(on_connect) if self.dbapi and self.use_native_unicode: def on_connect(conn): extensions.register_type(extensions.UNICODE, conn) extensions.register_type(extensions.UNICODEARRAY, conn) fns.append(on_connect) if self.dbapi and self.use_native_hstore: def on_connect(conn): hstore_oids = self._hstore_oids(conn) if hstore_oids is not None: oid, array_oid = hstore_oids kw = {"oid": oid} if util.py2k: kw["unicode"] = True if ( self.psycopg2_version >= self.FEATURE_VERSION_MAP["array_oid"] ): kw["array_oid"] = array_oid extras.register_hstore(conn, **kw) fns.append(on_connect) if self.dbapi and self._json_deserializer: def on_connect(conn): if self._has_native_json: extras.register_default_json( conn, loads=self._json_deserializer ) if self._has_native_jsonb: extras.register_default_jsonb( conn, loads=self._json_deserializer ) fns.append(on_connect) if fns: def on_connect(conn): for fn in fns: fn(conn) return on_connect else: return None def do_executemany(self, cursor, statement, parameters, context=None): if self.executemany_mode is EXECUTEMANY_DEFAULT: cursor.executemany(statement, parameters) return if ( self.executemany_mode is EXECUTEMANY_VALUES and context and context.isinsert and context.compiled.insert_single_values_expr ): executemany_values = ( "(%s)" % context.compiled.insert_single_values_expr ) # guard for statement that was altered via event hook or similar if executemany_values not in statement: executemany_values = None else: executemany_values = None if executemany_values: # Currently, SQLAlchemy does not pass "RETURNING" statements # into executemany(), since no DBAPI has ever supported that # until the introduction of psycopg2's executemany_values, so # we are not yet using the fetch=True flag. statement = statement.replace(executemany_values, "%s") if self.executemany_values_page_size: kwargs = {"page_size": self.executemany_values_page_size} else: kwargs = {} self._psycopg2_extras().execute_values( cursor, statement, parameters, template=executemany_values, **kwargs ) else: if self.executemany_batch_page_size: kwargs = {"page_size": self.executemany_batch_page_size} else: kwargs = {} self._psycopg2_extras().execute_batch( cursor, statement, parameters, **kwargs ) @util.memoized_instancemethod def _hstore_oids(self, conn): if self.psycopg2_version >= self.FEATURE_VERSION_MAP["hstore_adapter"]: extras = self._psycopg2_extras() oids = extras.HstoreAdapter.get_oids(conn) if oids is not None and oids[0]: return oids[0:2] return None def create_connect_args(self, url): opts = url.translate_connect_args(username="user") if opts: if "port" in opts: opts["port"] = int(opts["port"]) opts.update(url.query) # send individual dbname, user, password, host, port # parameters to psycopg2.connect() return ([], opts) elif url.query: # any other connection arguments, pass directly opts.update(url.query) return ([], opts) else: # no connection arguments whatsoever; psycopg2.connect() # requires that "dsn" be present as a blank string. return ([""], opts) def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.Error): # check the "closed" flag. this might not be # present on old psycopg2 versions. Also, # this flag doesn't actually help in a lot of disconnect # situations, so don't rely on it. if getattr(connection, "closed", False): return True # checks based on strings. in the case that .closed # didn't cut it, fall back onto these. str_e = str(e).partition("\n")[0] for msg in [ # these error messages from libpq: interfaces/libpq/fe-misc.c # and interfaces/libpq/fe-secure.c. "terminating connection", "closed the connection", "connection not open", "could not receive data from server", "could not send data to server", # psycopg2 client errors, psycopg2/conenction.h, # psycopg2/cursor.h "connection already closed", "cursor already closed", # not sure where this path is originally from, it may # be obsolete. It really says "losed", not "closed". "losed the connection unexpectedly", # these can occur in newer SSL "connection has been closed unexpectedly", "SSL SYSCALL error: Bad file descriptor", "SSL SYSCALL error: EOF detected", "SSL error: decryption failed or bad record mac", "SSL SYSCALL error: Operation timed out", ]: idx = str_e.find(msg) if idx >= 0 and '"' not in str_e[:idx]: return True return False dialect = PGDialect_psycopg2
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/provision.py
import time from ... import exc from ... import text from ...testing.provision import create_db from ...testing.provision import drop_db from ...testing.provision import log from ...testing.provision import temp_table_keyword_args @create_db.for_db("postgresql") def _pg_create_db(cfg, eng, ident): template_db = cfg.options.postgresql_templatedb with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn: try: _pg_drop_db(cfg, conn, ident) except Exception: pass if not template_db: template_db = conn.scalar("select current_database()") attempt = 0 while True: try: conn.execute( "CREATE DATABASE %s TEMPLATE %s" % (ident, template_db) ) except exc.OperationalError as err: attempt += 1 if attempt >= 3: raise if "accessed by other users" in str(err): log.info( "Waiting to create %s, URI %r, " "template DB %s is in use sleeping for .5", ident, eng.url, template_db, ) time.sleep(0.5) except: raise else: break @drop_db.for_db("postgresql") def _pg_drop_db(cfg, eng, ident): with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn: conn.execute( text( "select pg_terminate_backend(pid) from pg_stat_activity " "where usename=current_user and pid != pg_backend_pid() " "and datname=:dname" ), dname=ident, ) conn.execute("DROP DATABASE %s" % ident) @temp_table_keyword_args.for_db("postgresql") def _postgresql_temp_table_keyword_args(cfg, eng): return {"prefixes": ["TEMPORARY"]}
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/pg8000.py
# postgresql/pg8000.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors <see AUTHORS # file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r""" .. dialect:: postgresql+pg8000 :name: pg8000 :dbapi: pg8000 :connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...] :url: https://pythonhosted.org/pg8000/ .. note:: The pg8000 dialect is **not tested as part of SQLAlchemy's continuous integration** and may have unresolved issues. The recommended PostgreSQL dialect is psycopg2. .. _pg8000_unicode: Unicode ------- pg8000 will encode / decode string values between it and the server using the PostgreSQL ``client_encoding`` parameter; by default this is the value in the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. Typically, this can be changed to ``utf-8``, as a more useful default:: #client_encoding = sql_ascii # actually, defaults to database # encoding client_encoding = utf8 The ``client_encoding`` can be overridden for a session by executing the SQL: SET CLIENT_ENCODING TO 'utf8'; SQLAlchemy will execute this SQL on all new connections based on the value passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter:: engine = create_engine( "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8') .. _pg8000_isolation_level: pg8000 Transaction Isolation Level ------------------------------------- The pg8000 dialect offers the same isolation level settings as that of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect: * ``READ COMMITTED`` * ``READ UNCOMMITTED`` * ``REPEATABLE READ`` * ``SERIALIZABLE`` * ``AUTOCOMMIT`` .. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using pg8000. .. seealso:: :ref:`postgresql_isolation_level` :ref:`psycopg2_isolation_level` """ # noqa import decimal import re from .base import _DECIMAL_TYPES from .base import _FLOAT_TYPES from .base import _INT_TYPES from .base import PGCompiler from .base import PGDialect from .base import PGExecutionContext from .base import PGIdentifierPreparer from .base import UUID from .json import JSON from ... import exc from ... import processors from ... import types as sqltypes from ... import util from ...sql.elements import quoted_name try: from uuid import UUID as _python_UUID # noqa except ImportError: _python_UUID = None class _PGNumeric(sqltypes.Numeric): def result_processor(self, dialect, coltype): if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory( decimal.Decimal, self._effective_decimal_return_scale ) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # pg8000 returns Decimal natively for 1700 return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) else: if coltype in _FLOAT_TYPES: # pg8000 returns float natively for 701 return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype ) class _PGNumericNoBind(_PGNumeric): def bind_processor(self, dialect): return None class _PGJSON(JSON): def result_processor(self, dialect, coltype): if dialect._dbapi_version > (1, 10, 1): return None # Has native JSON else: return super(_PGJSON, self).result_processor(dialect, coltype) class _PGUUID(UUID): def bind_processor(self, dialect): if not self.as_uuid: def process(value): if value is not None: value = _python_UUID(value) return value return process def result_processor(self, dialect, coltype): if not self.as_uuid: def process(value): if value is not None: value = str(value) return value return process class PGExecutionContext_pg8000(PGExecutionContext): pass class PGCompiler_pg8000(PGCompiler): def visit_mod_binary(self, binary, operator, **kw): return ( self.process(binary.left, **kw) + " %% " + self.process(binary.right, **kw) ) def post_process_text(self, text): if "%%" in text: util.warn( "The SQLAlchemy postgresql dialect " "now automatically escapes '%' in text() " "expressions to '%%'." ) return text.replace("%", "%%") class PGIdentifierPreparer_pg8000(PGIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace("%", "%%") class PGDialect_pg8000(PGDialect): driver = "pg8000" supports_unicode_statements = True supports_unicode_binds = True default_paramstyle = "format" supports_sane_multi_rowcount = True execution_ctx_cls = PGExecutionContext_pg8000 statement_compiler = PGCompiler_pg8000 preparer = PGIdentifierPreparer_pg8000 description_encoding = "use_encoding" colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric: _PGNumericNoBind, sqltypes.Float: _PGNumeric, JSON: _PGJSON, sqltypes.JSON: _PGJSON, UUID: _PGUUID, }, ) def __init__(self, client_encoding=None, **kwargs): PGDialect.__init__(self, **kwargs) self.client_encoding = client_encoding def initialize(self, connection): self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14) super(PGDialect_pg8000, self).initialize(connection) @util.memoized_property def _dbapi_version(self): if self.dbapi and hasattr(self.dbapi, "__version__"): return tuple( [ int(x) for x in re.findall( r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__ ) ] ) else: return (99, 99, 99) @classmethod def dbapi(cls): return __import__("pg8000") def create_connect_args(self, url): opts = url.translate_connect_args(username="user") if "port" in opts: opts["port"] = int(opts["port"]) opts.update(url.query) return ([], opts) def is_disconnect(self, e, connection, cursor): return "connection is closed" in str(e) def set_isolation_level(self, connection, level): level = level.replace("_", " ") # adjust for ConnectionFairy possibly being present if hasattr(connection, "connection"): connection = connection.connection if level == "AUTOCOMMIT": connection.autocommit = True elif level in self._isolation_lookup: connection.autocommit = False cursor = connection.cursor() cursor.execute( "SET SESSION CHARACTERISTICS AS TRANSACTION " "ISOLATION LEVEL %s" % level ) cursor.execute("COMMIT") cursor.close() else: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s or AUTOCOMMIT" % (level, self.name, ", ".join(self._isolation_lookup)) ) def set_client_encoding(self, connection, client_encoding): # adjust for ConnectionFairy possibly being present if hasattr(connection, "connection"): connection = connection.connection cursor = connection.cursor() cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'") cursor.execute("COMMIT") cursor.close() def do_begin_twophase(self, connection, xid): connection.connection.tpc_begin((0, xid, "")) def do_prepare_twophase(self, connection, xid): connection.connection.tpc_prepare() def do_rollback_twophase( self, connection, xid, is_prepared=True, recover=False ): connection.connection.tpc_rollback((0, xid, "")) def do_commit_twophase( self, connection, xid, is_prepared=True, recover=False ): connection.connection.tpc_commit((0, xid, "")) def do_recover_twophase(self, connection): return [row[1] for row in connection.connection.tpc_recover()] def on_connect(self): fns = [] def on_connect(conn): conn.py_types[quoted_name] = conn.py_types[util.text_type] fns.append(on_connect) if self.client_encoding is not None: def on_connect(conn): self.set_client_encoding(conn, self.client_encoding) fns.append(on_connect) if self.isolation_level is not None: def on_connect(conn): self.set_isolation_level(conn, self.isolation_level) fns.append(on_connect) if len(fns) > 0: def on_connect(conn): for fn in fns: fn(conn) return on_connect else: return None dialect = PGDialect_pg8000
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/array.py
# postgresql/array.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re from ... import types as sqltypes from ... import util from ...sql import expression from ...sql import operators def Any(other, arrexpr, operator=operators.eq): """A synonym for the :meth:`.ARRAY.Comparator.any` method. This method is legacy and is here for backwards-compatibility. .. seealso:: :func:`_expression.any_` """ return arrexpr.any(other, operator) def All(other, arrexpr, operator=operators.eq): """A synonym for the :meth:`.ARRAY.Comparator.all` method. This method is legacy and is here for backwards-compatibility. .. seealso:: :func:`_expression.all_` """ return arrexpr.all(other, operator) class array(expression.Tuple): """A PostgreSQL ARRAY literal. This is used to produce ARRAY literals in SQL expressions, e.g.:: from sqlalchemy.dialects.postgresql import array from sqlalchemy.dialects import postgresql from sqlalchemy import select, func stmt = select([ array([1,2]) + array([3,4,5]) ]) print(stmt.compile(dialect=postgresql.dialect())) Produces the SQL:: SELECT ARRAY[%(param_1)s, %(param_2)s] || ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1 An instance of :class:`.array` will always have the datatype :class:`_types.ARRAY`. The "inner" type of the array is inferred from the values present, unless the ``type_`` keyword argument is passed:: array(['foo', 'bar'], type_=CHAR) Multidimensional arrays are produced by nesting :class:`.array` constructs. The dimensionality of the final :class:`_types.ARRAY` type is calculated by recursively adding the dimensions of the inner :class:`_types.ARRAY` type:: stmt = select([ array([ array([1, 2]), array([3, 4]), array([column('q'), column('x')]) ]) ]) print(stmt.compile(dialect=postgresql.dialect())) Produces:: SELECT ARRAY[ARRAY[%(param_1)s, %(param_2)s], ARRAY[%(param_3)s, %(param_4)s], ARRAY[q, x]] AS anon_1 .. versionadded:: 1.3.6 added support for multidimensional array literals .. seealso:: :class:`_postgresql.ARRAY` """ __visit_name__ = "array" def __init__(self, clauses, **kw): super(array, self).__init__(*clauses, **kw) if isinstance(self.type, ARRAY): self.type = ARRAY( self.type.item_type, dimensions=self.type.dimensions + 1 if self.type.dimensions is not None else 2, ) else: self.type = ARRAY(self.type) def _bind_param(self, operator, obj, _assume_scalar=False, type_=None): if _assume_scalar or operator is operators.getitem: return expression.BindParameter( None, obj, _compared_to_operator=operator, type_=type_, _compared_to_type=self.type, unique=True, ) else: return array( [ self._bind_param( operator, o, _assume_scalar=True, type_=type_ ) for o in obj ] ) def self_group(self, against=None): if against in (operators.any_op, operators.all_op, operators.getitem): return expression.Grouping(self) else: return self CONTAINS = operators.custom_op("@>", precedence=5) CONTAINED_BY = operators.custom_op("<@", precedence=5) OVERLAP = operators.custom_op("&&", precedence=5) class ARRAY(sqltypes.ARRAY): """PostgreSQL ARRAY type. .. versionchanged:: 1.1 The :class:`_postgresql.ARRAY` type is now a subclass of the core :class:`_types.ARRAY` type. The :class:`_postgresql.ARRAY` type is constructed in the same way as the core :class:`_types.ARRAY` type; a member type is required, and a number of dimensions is recommended if the type is to be used for more than one dimension:: from sqlalchemy.dialects import postgresql mytable = Table("mytable", metadata, Column("data", postgresql.ARRAY(Integer, dimensions=2)) ) The :class:`_postgresql.ARRAY` type provides all operations defined on the core :class:`_types.ARRAY` type, including support for "dimensions", indexed access, and simple matching such as :meth:`.types.ARRAY.Comparator.any` and :meth:`.types.ARRAY.Comparator.all`. :class:`_postgresql.ARRAY` class also provides PostgreSQL-specific methods for containment operations, including :meth:`.postgresql.ARRAY.Comparator.contains` :meth:`.postgresql.ARRAY.Comparator.contained_by`, and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.:: mytable.c.data.contains([1, 2]) The :class:`_postgresql.ARRAY` type may not be supported on all PostgreSQL DBAPIs; it is currently known to work on psycopg2 only. Additionally, the :class:`_postgresql.ARRAY` type does not work directly in conjunction with the :class:`.ENUM` type. For a workaround, see the special type at :ref:`postgresql_array_of_enum`. .. seealso:: :class:`_types.ARRAY` - base array type :class:`_postgresql.array` - produces a literal array value. """ class Comparator(sqltypes.ARRAY.Comparator): """Define comparison operations for :class:`_types.ARRAY`. Note that these operations are in addition to those provided by the base :class:`.types.ARRAY.Comparator` class, including :meth:`.types.ARRAY.Comparator.any` and :meth:`.types.ARRAY.Comparator.all`. """ def contains(self, other, **kwargs): """Boolean expression. Test if elements are a superset of the elements of the argument array expression. """ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean) def contained_by(self, other): """Boolean expression. Test if elements are a proper subset of the elements of the argument array expression. """ return self.operate( CONTAINED_BY, other, result_type=sqltypes.Boolean ) def overlap(self, other): """Boolean expression. Test if array has elements in common with an argument array expression. """ return self.operate(OVERLAP, other, result_type=sqltypes.Boolean) comparator_factory = Comparator def __init__( self, item_type, as_tuple=False, dimensions=None, zero_indexes=False ): """Construct an ARRAY. E.g.:: Column('myarray', ARRAY(Integer)) Arguments are: :param item_type: The data type of items of this array. Note that dimensionality is irrelevant here, so multi-dimensional arrays like ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as ``ARRAY(ARRAY(Integer))`` or such. :param as_tuple=False: Specify whether return results should be converted to tuples from lists. DBAPIs such as psycopg2 return lists by default. When tuples are returned, the results are hashable. :param dimensions: if non-None, the ARRAY will assume a fixed number of dimensions. This will cause the DDL emitted for this ARRAY to include the exact number of bracket clauses ``[]``, and will also optimize the performance of the type overall. Note that PG arrays are always implicitly "non-dimensioned", meaning they can store any number of dimensions no matter how they were declared. :param zero_indexes=False: when True, index values will be converted between Python zero-based and PostgreSQL one-based indexes, e.g. a value of one will be added to all index values before passing to the database. .. versionadded:: 0.9.5 """ if isinstance(item_type, ARRAY): raise ValueError( "Do not nest ARRAY types; ARRAY(basetype) " "handles multi-dimensional arrays of basetype" ) if isinstance(item_type, type): item_type = item_type() self.item_type = item_type self.as_tuple = as_tuple self.dimensions = dimensions self.zero_indexes = zero_indexes @property def hashable(self): return self.as_tuple @property def python_type(self): return list def compare_values(self, x, y): return x == y def _proc_array(self, arr, itemproc, dim, collection): if dim is None: arr = list(arr) if ( dim == 1 or dim is None and ( # this has to be (list, tuple), or at least # not hasattr('__iter__'), since Py3K strings # etc. have __iter__ not arr or not isinstance(arr[0], (list, tuple)) ) ): if itemproc: return collection(itemproc(x) for x in arr) else: return collection(arr) else: return collection( self._proc_array( x, itemproc, dim - 1 if dim is not None else None, collection, ) for x in arr ) @util.memoized_property def _require_cast(self): return self._against_native_enum or isinstance( self.item_type, sqltypes.JSON ) @util.memoized_property def _against_native_enum(self): return ( isinstance(self.item_type, sqltypes.Enum) and self.item_type.native_enum ) def bind_expression(self, bindvalue): if self._require_cast: return expression.cast(bindvalue, self) else: return bindvalue def bind_processor(self, dialect): item_proc = self.item_type.dialect_impl(dialect).bind_processor( dialect ) def process(value): if value is None: return value else: return self._proc_array( value, item_proc, self.dimensions, list ) return process def result_processor(self, dialect, coltype): item_proc = self.item_type.dialect_impl(dialect).result_processor( dialect, coltype ) def process(value): if value is None: return value else: return self._proc_array( value, item_proc, self.dimensions, tuple if self.as_tuple else list, ) if self._against_native_enum: super_rp = process def handle_raw_string(value): inner = re.match(r"^{(.*)}$", value).group(1) return inner.split(",") if inner else [] def process(value): if value is None: return value # isinstance(value, util.string_types) is required to handle # the # case where a TypeDecorator for and Array of Enum is # used like was required in sa < 1.3.17 return super_rp( handle_raw_string(value) if isinstance(value, util.string_types) else value ) return process
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/dml.py
# postgresql/on_conflict.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import ext from ... import util from ...sql import schema from ...sql.base import _generative from ...sql.dml import Insert as StandardInsert from ...sql.elements import ClauseElement from ...sql.expression import alias from ...util.langhelpers import public_factory __all__ = ("Insert", "insert") class Insert(StandardInsert): """PostgreSQL-specific implementation of INSERT. Adds methods for PG-specific syntaxes such as ON CONFLICT. The :class:`_postgresql.Insert` object is created using the :func:`sqlalchemy.dialects.postgresql.insert` function. .. versionadded:: 1.1 """ @util.memoized_property def excluded(self): """Provide the ``excluded`` namespace for an ON CONFLICT statement PG's ON CONFLICT clause allows reference to the row that would be inserted, known as ``excluded``. This attribute provides all columns in this row to be referenceable. .. seealso:: :ref:`postgresql_insert_on_conflict` - example of how to use :attr:`_expression.Insert.excluded` """ return alias(self.table, name="excluded").columns @_generative def on_conflict_do_update( self, constraint=None, index_elements=None, index_where=None, set_=None, where=None, ): r""" Specifies a DO UPDATE SET action for ON CONFLICT clause. Either the ``constraint`` or ``index_elements`` argument is required, but only one of these can be specified. :param constraint: The name of a unique or exclusion constraint on the table, or the constraint object itself if it has a .name attribute. :param index_elements: A sequence consisting of string column names, :class:`_schema.Column` objects, or other column expression objects that will be used to infer a target index. :param index_where: Additional WHERE criterion that can be used to infer a conditional target index. :param set\_: Required argument. A dictionary or other mapping object with column names as keys and expressions or literals as values, specifying the ``SET`` actions to take. If the target :class:`_schema.Column` specifies a ". key" attribute distinct from the column name, that key should be used. .. warning:: This dictionary does **not** take into account Python-specified default UPDATE values or generation functions, e.g. those specified using :paramref:`_schema.Column.onupdate`. These values will not be exercised for an ON CONFLICT style of UPDATE, unless they are manually specified in the :paramref:`.Insert.on_conflict_do_update.set_` dictionary. :param where: Optional argument. If present, can be a literal SQL string or an acceptable expression for a ``WHERE`` clause that restricts the rows affected by ``DO UPDATE SET``. Rows not meeting the ``WHERE`` condition will not be updated (effectively a ``DO NOTHING`` for those rows). .. versionadded:: 1.1 .. seealso:: :ref:`postgresql_insert_on_conflict` """ self._post_values_clause = OnConflictDoUpdate( constraint, index_elements, index_where, set_, where ) return self @_generative def on_conflict_do_nothing( self, constraint=None, index_elements=None, index_where=None ): """ Specifies a DO NOTHING action for ON CONFLICT clause. The ``constraint`` and ``index_elements`` arguments are optional, but only one of these can be specified. :param constraint: The name of a unique or exclusion constraint on the table, or the constraint object itself if it has a .name attribute. :param index_elements: A sequence consisting of string column names, :class:`_schema.Column` objects, or other column expression objects that will be used to infer a target index. :param index_where: Additional WHERE criterion that can be used to infer a conditional target index. .. versionadded:: 1.1 .. seealso:: :ref:`postgresql_insert_on_conflict` """ self._post_values_clause = OnConflictDoNothing( constraint, index_elements, index_where ) return self insert = public_factory( Insert, ".dialects.postgresql.insert", ".dialects.postgresql.Insert" ) class OnConflictClause(ClauseElement): def __init__(self, constraint=None, index_elements=None, index_where=None): if constraint is not None: if not isinstance(constraint, util.string_types) and isinstance( constraint, (schema.Index, schema.Constraint, ext.ExcludeConstraint), ): constraint = getattr(constraint, "name") or constraint if constraint is not None: if index_elements is not None: raise ValueError( "'constraint' and 'index_elements' are mutually exclusive" ) if isinstance(constraint, util.string_types): self.constraint_target = constraint self.inferred_target_elements = None self.inferred_target_whereclause = None elif isinstance(constraint, schema.Index): index_elements = constraint.expressions index_where = constraint.dialect_options["postgresql"].get( "where" ) elif isinstance(constraint, ext.ExcludeConstraint): index_elements = constraint.columns index_where = constraint.where else: index_elements = constraint.columns index_where = constraint.dialect_options["postgresql"].get( "where" ) if index_elements is not None: self.constraint_target = None self.inferred_target_elements = index_elements self.inferred_target_whereclause = index_where elif constraint is None: self.constraint_target = ( self.inferred_target_elements ) = self.inferred_target_whereclause = None class OnConflictDoNothing(OnConflictClause): __visit_name__ = "on_conflict_do_nothing" class OnConflictDoUpdate(OnConflictClause): __visit_name__ = "on_conflict_do_update" def __init__( self, constraint=None, index_elements=None, index_where=None, set_=None, where=None, ): super(OnConflictDoUpdate, self).__init__( constraint=constraint, index_elements=index_elements, index_where=index_where, ) if ( self.inferred_target_elements is None and self.constraint_target is None ): raise ValueError( "Either constraint or index_elements, " "but not both, must be specified unless DO NOTHING" ) if not isinstance(set_, dict) or not set_: raise ValueError("set parameter must be a non-empty dictionary") self.update_values_to_set = [ (key, value) for key, value in set_.items() ] self.update_whereclause = where
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/json.py
# postgresql/json.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import from ... import types as sqltypes from ... import util from ...sql import operators __all__ = ("JSON", "JSONB") idx_precedence = operators._PRECEDENCE[operators.json_getitem_op] ASTEXT = operators.custom_op( "->>", precedence=idx_precedence, natural_self_precedent=True, eager_grouping=True, ) JSONPATH_ASTEXT = operators.custom_op( "#>>", precedence=idx_precedence, natural_self_precedent=True, eager_grouping=True, ) HAS_KEY = operators.custom_op( "?", precedence=idx_precedence, natural_self_precedent=True, eager_grouping=True, ) HAS_ALL = operators.custom_op( "?&", precedence=idx_precedence, natural_self_precedent=True, eager_grouping=True, ) HAS_ANY = operators.custom_op( "?|", precedence=idx_precedence, natural_self_precedent=True, eager_grouping=True, ) CONTAINS = operators.custom_op( "@>", precedence=idx_precedence, natural_self_precedent=True, eager_grouping=True, ) CONTAINED_BY = operators.custom_op( "<@", precedence=idx_precedence, natural_self_precedent=True, eager_grouping=True, ) class JSONPathType(sqltypes.JSON.JSONPathType): def bind_processor(self, dialect): super_proc = self.string_bind_processor(dialect) def process(value): assert isinstance(value, util.collections_abc.Sequence) tokens = [util.text_type(elem) for elem in value] value = "{%s}" % (", ".join(tokens)) if super_proc: value = super_proc(value) return value return process def literal_processor(self, dialect): super_proc = self.string_literal_processor(dialect) def process(value): assert isinstance(value, util.collections_abc.Sequence) tokens = [util.text_type(elem) for elem in value] value = "{%s}" % (", ".join(tokens)) if super_proc: value = super_proc(value) return value return process class JSON(sqltypes.JSON): """Represent the PostgreSQL JSON type. This type is a specialization of the Core-level :class:`_types.JSON` type. Be sure to read the documentation for :class:`_types.JSON` for important tips regarding treatment of NULL values and ORM use. .. versionchanged:: 1.1 :class:`_postgresql.JSON` is now a PostgreSQL- specific specialization of the new :class:`_types.JSON` type. The operators provided by the PostgreSQL version of :class:`_types.JSON` include: * Index operations (the ``->`` operator):: data_table.c.data['some key'] data_table.c.data[5] * Index operations returning text (the ``->>`` operator):: data_table.c.data['some key'].astext == 'some value' Note that equivalent functionality is available via the :attr:`.JSON.Comparator.as_string` accessor. * Index operations with CAST (equivalent to ``CAST(col ->> ['some key'] AS <type>)``):: data_table.c.data['some key'].astext.cast(Integer) == 5 Note that equivalent functionality is available via the :attr:`.JSON.Comparator.as_integer` and similar accessors. * Path index operations (the ``#>`` operator):: data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')] * Path index operations returning text (the ``#>>`` operator):: data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == 'some value' .. versionchanged:: 1.1 The :meth:`_expression.ColumnElement.cast` operator on JSON objects now requires that the :attr:`.JSON.Comparator.astext` modifier be called explicitly, if the cast works only from a textual string. Index operations return an expression object whose type defaults to :class:`_types.JSON` by default, so that further JSON-oriented instructions may be called upon the result type. Custom serializers and deserializers are specified at the dialect level, that is using :func:`_sa.create_engine`. The reason for this is that when using psycopg2, the DBAPI only allows serializers at the per-cursor or per-connection level. E.g.:: engine = create_engine("postgresql://scott:tiger@localhost/test", json_serializer=my_serialize_fn, json_deserializer=my_deserialize_fn ) When using the psycopg2 dialect, the json_deserializer is registered against the database using ``psycopg2.extras.register_default_json``. .. seealso:: :class:`_types.JSON` - Core level JSON type :class:`_postgresql.JSONB` """ # noqa astext_type = sqltypes.Text() def __init__(self, none_as_null=False, astext_type=None): """Construct a :class:`_types.JSON` type. :param none_as_null: if True, persist the value ``None`` as a SQL NULL value, not the JSON encoding of ``null``. Note that when this flag is False, the :func:`.null` construct can still be used to persist a NULL value:: from sqlalchemy import null conn.execute(table.insert(), data=null()) .. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null` is now supported in order to persist a NULL value. .. seealso:: :attr:`_types.JSON.NULL` :param astext_type: the type to use for the :attr:`.JSON.Comparator.astext` accessor on indexed attributes. Defaults to :class:`_types.Text`. .. versionadded:: 1.1 """ super(JSON, self).__init__(none_as_null=none_as_null) if astext_type is not None: self.astext_type = astext_type class Comparator(sqltypes.JSON.Comparator): """Define comparison operations for :class:`_types.JSON`.""" @property def astext(self): """On an indexed expression, use the "astext" (e.g. "->>") conversion when rendered in SQL. E.g.:: select([data_table.c.data['some key'].astext]) .. seealso:: :meth:`_expression.ColumnElement.cast` """ if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType): return self.expr.left.operate( JSONPATH_ASTEXT, self.expr.right, result_type=self.type.astext_type, ) else: return self.expr.left.operate( ASTEXT, self.expr.right, result_type=self.type.astext_type ) comparator_factory = Comparator class JSONB(JSON): """Represent the PostgreSQL JSONB type. The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data, e. g.:: data_table = Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('data', JSONB) ) with engine.connect() as conn: conn.execute( data_table.insert(), data = {"key1": "value1", "key2": "value2"} ) The :class:`_postgresql.JSONB` type includes all operations provided by :class:`_types.JSON`, including the same behaviors for indexing operations . It also adds additional operators specific to JSONB, including :meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`, :meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`, and :meth:`.JSONB.Comparator.contained_by`. Like the :class:`_types.JSON` type, the :class:`_postgresql.JSONB` type does not detect in-place changes when used with the ORM, unless the :mod:`sqlalchemy.ext.mutable` extension is used. Custom serializers and deserializers are shared with the :class:`_types.JSON` class, using the ``json_serializer`` and ``json_deserializer`` keyword arguments. These must be specified at the dialect level using :func:`_sa.create_engine`. When using psycopg2, the serializers are associated with the jsonb type using ``psycopg2.extras.register_default_jsonb`` on a per-connection basis, in the same way that ``psycopg2.extras.register_default_json`` is used to register these handlers with the json type. .. versionadded:: 0.9.7 .. seealso:: :class:`_types.JSON` """ __visit_name__ = "JSONB" class Comparator(JSON.Comparator): """Define comparison operations for :class:`_types.JSON`.""" def has_key(self, other): """Boolean expression. Test for presence of a key. Note that the key may be a SQLA expression. """ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean) def has_all(self, other): """Boolean expression. Test for presence of all keys in jsonb """ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean) def has_any(self, other): """Boolean expression. Test for presence of any key in jsonb """ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean) def contains(self, other, **kwargs): """Boolean expression. Test if keys (or array) are a superset of/contained the keys of the argument jsonb expression. """ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean) def contained_by(self, other): """Boolean expression. Test if keys are a proper subset of the keys of the argument jsonb expression. """ return self.operate( CONTAINED_BY, other, result_type=sqltypes.Boolean ) comparator_factory = Comparator
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/base.py
# postgresql/base.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r""" .. dialect:: postgresql :name: PostgreSQL .. _postgresql_sequences: Sequences/SERIAL/IDENTITY ------------------------- PostgreSQL supports sequences, and SQLAlchemy uses these as the default means of creating new primary key values for integer-based primary key columns. When creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for integer-based primary key columns, which generates a sequence and server side default corresponding to the column. To specify a specific named sequence to be used for primary key generation, use the :func:`~sqlalchemy.schema.Sequence` construct:: Table('sometable', metadata, Column('id', Integer, Sequence('some_id_seq'), primary_key=True) ) When SQLAlchemy issues a single INSERT statement, to fulfill the contract of having the "last insert identifier" available, a RETURNING clause is added to the INSERT statement which specifies the primary key columns should be returned after the statement completes. The RETURNING functionality only takes place if PostgreSQL 8.2 or later is in use. As a fallback approach, the sequence, whether specified explicitly or implicitly via ``SERIAL``, is executed independently beforehand, the returned value to be used in the subsequent insert. Note that when an :func:`~sqlalchemy.sql.expression.insert()` construct is executed using "executemany" semantics, the "last inserted identifier" functionality does not apply; no RETURNING clause is emitted nor is the sequence pre-executed in this case. To force the usage of RETURNING by default off, specify the flag ``implicit_returning=False`` to :func:`_sa.create_engine`. PostgreSQL 10 IDENTITY columns ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PostgreSQL 10 has a new IDENTITY feature that supersedes the use of SERIAL. Built-in support for rendering of IDENTITY is not available yet, however the following compilation hook may be used to replace occurrences of SERIAL with IDENTITY:: from sqlalchemy.schema import CreateColumn from sqlalchemy.ext.compiler import compiles @compiles(CreateColumn, 'postgresql') def use_identity(element, compiler, **kw): text = compiler.visit_create_column(element, **kw) text = text.replace("SERIAL", "INT GENERATED BY DEFAULT AS IDENTITY") return text Using the above, a table such as:: t = Table( 't', m, Column('id', Integer, primary_key=True), Column('data', String) ) Will generate on the backing database as:: CREATE TABLE t ( id INT GENERATED BY DEFAULT AS IDENTITY NOT NULL, data VARCHAR, PRIMARY KEY (id) ) .. _postgresql_isolation_level: Transaction Isolation Level --------------------------- All PostgreSQL dialects support setting of transaction isolation level both via a dialect-specific parameter :paramref:`_sa.create_engine.isolation_level` accepted by :func:`_sa.create_engine`, as well as the :paramref:`.Connection.execution_options.isolation_level` argument as passed to :meth:`_engine.Connection.execution_options`. When using a non-psycopg2 dialect, this feature works by issuing the command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for each new connection. For the special AUTOCOMMIT isolation level, DBAPI-specific techniques are used. To set isolation level using :func:`_sa.create_engine`:: engine = create_engine( "postgresql+pg8000://scott:tiger@localhost/test", isolation_level="READ UNCOMMITTED" ) To set using per-connection execution options:: connection = engine.connect() connection = connection.execution_options( isolation_level="READ COMMITTED" ) Valid values for ``isolation_level`` include: * ``READ COMMITTED`` * ``READ UNCOMMITTED`` * ``REPEATABLE READ`` * ``SERIALIZABLE`` * ``AUTOCOMMIT`` - on psycopg2 / pg8000 only .. seealso:: :ref:`psycopg2_isolation_level` :ref:`pg8000_isolation_level` .. _postgresql_schema_reflection: Remote-Schema Table Introspection and PostgreSQL search_path ------------------------------------------------------------ **TL;DR;**: keep the ``search_path`` variable set to its default of ``public``, name schemas **other** than ``public`` explicitly within ``Table`` definitions. The PostgreSQL dialect can reflect tables from any schema. The :paramref:`_schema.Table.schema` argument, or alternatively the :paramref:`.MetaData.reflect.schema` argument determines which schema will be searched for the table or tables. The reflected :class:`_schema.Table` objects will in all cases retain this ``.schema`` attribute as was specified. However, with regards to tables which these :class:`_schema.Table` objects refer to via foreign key constraint, a decision must be made as to how the ``.schema`` is represented in those remote tables, in the case where that remote schema name is also a member of the current `PostgreSQL search path <http://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_. By default, the PostgreSQL dialect mimics the behavior encouraged by PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure. This function returns a sample definition for a particular foreign key constraint, omitting the referenced schema name from that definition when the name is also in the PostgreSQL schema search path. The interaction below illustrates this behavior:: test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY); CREATE TABLE test=> CREATE TABLE referring( test(> id INTEGER PRIMARY KEY, test(> referred_id INTEGER REFERENCES test_schema.referred(id)); CREATE TABLE test=> SET search_path TO public, test_schema; test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n test-> ON n.oid = c.relnamespace test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid test-> WHERE c.relname='referring' AND r.contype = 'f' test-> ; pg_get_constraintdef --------------------------------------------------- FOREIGN KEY (referred_id) REFERENCES referred(id) (1 row) Above, we created a table ``referred`` as a member of the remote schema ``test_schema``, however when we added ``test_schema`` to the PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the ``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of the function. On the other hand, if we set the search path back to the typical default of ``public``:: test=> SET search_path TO public; SET The same query against ``pg_get_constraintdef()`` now returns the fully schema-qualified name for us:: test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n test-> ON n.oid = c.relnamespace test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid test-> WHERE c.relname='referring' AND r.contype = 'f'; pg_get_constraintdef --------------------------------------------------------------- FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id) (1 row) SQLAlchemy will by default use the return value of ``pg_get_constraintdef()`` in order to determine the remote schema name. That is, if our ``search_path`` were set to include ``test_schema``, and we invoked a table reflection process as follows:: >>> from sqlalchemy import Table, MetaData, create_engine >>> engine = create_engine("postgresql://scott:tiger@localhost/test") >>> with engine.connect() as conn: ... conn.execute("SET search_path TO test_schema, public") ... meta = MetaData() ... referring = Table('referring', meta, ... autoload=True, autoload_with=conn) ... <sqlalchemy.engine.result.ResultProxy object at 0x101612ed0> The above process would deliver to the :attr:`_schema.MetaData.tables` collection ``referred`` table named **without** the schema:: >>> meta.tables['referred'].schema is None True To alter the behavior of reflection such that the referred schema is maintained regardless of the ``search_path`` setting, use the ``postgresql_ignore_search_path`` option, which can be specified as a dialect-specific argument to both :class:`_schema.Table` as well as :meth:`_schema.MetaData.reflect`:: >>> with engine.connect() as conn: ... conn.execute("SET search_path TO test_schema, public") ... meta = MetaData() ... referring = Table('referring', meta, autoload=True, ... autoload_with=conn, ... postgresql_ignore_search_path=True) ... <sqlalchemy.engine.result.ResultProxy object at 0x1016126d0> We will now have ``test_schema.referred`` stored as schema-qualified:: >>> meta.tables['test_schema.referred'].schema 'test_schema' .. sidebar:: Best Practices for PostgreSQL Schema reflection The description of PostgreSQL schema reflection behavior is complex, and is the product of many years of dealing with widely varied use cases and user preferences. But in fact, there's no need to understand any of it if you just stick to the simplest use pattern: leave the ``search_path`` set to its default of ``public`` only, never refer to the name ``public`` as an explicit schema name otherwise, and refer to all other schema names explicitly when building up a :class:`_schema.Table` object. The options described here are only for those users who can't, or prefer not to, stay within these guidelines. Note that **in all cases**, the "default" schema is always reflected as ``None``. The "default" schema on PostgreSQL is that which is returned by the PostgreSQL ``current_schema()`` function. On a typical PostgreSQL installation, this is the name ``public``. So a table that refers to another which is in the ``public`` (i.e. default) schema will always have the ``.schema`` attribute set to ``None``. .. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path`` dialect-level option accepted by :class:`_schema.Table` and :meth:`_schema.MetaData.reflect`. .. seealso:: `The Schema Search Path <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_ - on the PostgreSQL website. INSERT/UPDATE...RETURNING ------------------------- The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and ``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default for single-row INSERT statements in order to fetch newly generated primary key identifiers. To specify an explicit ``RETURNING`` clause, use the :meth:`._UpdateBase.returning` method on a per-statement basis:: # INSERT..RETURNING result = table.insert().returning(table.c.col1, table.c.col2).\ values(name='foo') print(result.fetchall()) # UPDATE..RETURNING result = table.update().returning(table.c.col1, table.c.col2).\ where(table.c.name=='foo').values(name='bar') print(result.fetchall()) # DELETE..RETURNING result = table.delete().returning(table.c.col1, table.c.col2).\ where(table.c.name=='foo') print(result.fetchall()) .. _postgresql_insert_on_conflict: INSERT...ON CONFLICT (Upsert) ------------------------------ Starting with version 9.5, PostgreSQL allows "upserts" (update or insert) of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` statement. A candidate row will only be inserted if that row does not violate any unique constraints. In the case of a unique constraint violation, a secondary action can occur which can be either "DO UPDATE", indicating that the data in the target row should be updated, or "DO NOTHING", which indicates to silently skip this row. Conflicts are determined using existing unique constraints and indexes. These constraints may be identified either using their name as stated in DDL, or they may be *inferred* by stating the columns and conditions that comprise the indexes. SQLAlchemy provides ``ON CONFLICT`` support via the PostgreSQL-specific :func:`_postgresql.insert()` function, which provides the generative methods :meth:`~.postgresql.Insert.on_conflict_do_update` and :meth:`~.postgresql.Insert.on_conflict_do_nothing`:: from sqlalchemy.dialects.postgresql import insert insert_stmt = insert(my_table).values( id='some_existing_id', data='inserted value') do_nothing_stmt = insert_stmt.on_conflict_do_nothing( index_elements=['id'] ) conn.execute(do_nothing_stmt) do_update_stmt = insert_stmt.on_conflict_do_update( constraint='pk_my_table', set_=dict(data='updated value') ) conn.execute(do_update_stmt) Both methods supply the "target" of the conflict using either the named constraint or by column inference: * The :paramref:`.Insert.on_conflict_do_update.index_elements` argument specifies a sequence containing string column names, :class:`_schema.Column` objects, and/or SQL expression elements, which would identify a unique index:: do_update_stmt = insert_stmt.on_conflict_do_update( index_elements=['id'], set_=dict(data='updated value') ) do_update_stmt = insert_stmt.on_conflict_do_update( index_elements=[my_table.c.id], set_=dict(data='updated value') ) * When using :paramref:`.Insert.on_conflict_do_update.index_elements` to infer an index, a partial index can be inferred by also specifying the use the :paramref:`.Insert.on_conflict_do_update.index_where` parameter:: from sqlalchemy.dialects.postgresql import insert stmt = insert(my_table).values(user_email='a@b.com', data='inserted data') stmt = stmt.on_conflict_do_update( index_elements=[my_table.c.user_email], index_where=my_table.c.user_email.like('%@gmail.com'), set_=dict(data=stmt.excluded.data) ) conn.execute(stmt) * The :paramref:`.Insert.on_conflict_do_update.constraint` argument is used to specify an index directly rather than inferring it. This can be the name of a UNIQUE constraint, a PRIMARY KEY constraint, or an INDEX:: do_update_stmt = insert_stmt.on_conflict_do_update( constraint='my_table_idx_1', set_=dict(data='updated value') ) do_update_stmt = insert_stmt.on_conflict_do_update( constraint='my_table_pk', set_=dict(data='updated value') ) * The :paramref:`.Insert.on_conflict_do_update.constraint` argument may also refer to a SQLAlchemy construct representing a constraint, e.g. :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`, :class:`.Index`, or :class:`.ExcludeConstraint`. In this use, if the constraint has a name, it is used directly. Otherwise, if the constraint is unnamed, then inference will be used, where the expressions and optional WHERE clause of the constraint will be spelled out in the construct. This use is especially convenient to refer to the named or unnamed primary key of a :class:`_schema.Table` using the :attr:`_schema.Table.primary_key` attribute:: do_update_stmt = insert_stmt.on_conflict_do_update( constraint=my_table.primary_key, set_=dict(data='updated value') ) ``ON CONFLICT...DO UPDATE`` is used to perform an update of the already existing row, using any combination of new values as well as values from the proposed insertion. These values are specified using the :paramref:`.Insert.on_conflict_do_update.set_` parameter. This parameter accepts a dictionary which consists of direct values for UPDATE:: from sqlalchemy.dialects.postgresql import insert stmt = insert(my_table).values(id='some_id', data='inserted value') do_update_stmt = stmt.on_conflict_do_update( index_elements=['id'], set_=dict(data='updated value') ) conn.execute(do_update_stmt) .. warning:: The :meth:`_expression.Insert.on_conflict_do_update` method does **not** take into account Python-side default UPDATE values or generation functions, e.g. those specified using :paramref:`_schema.Column.onupdate`. These values will not be exercised for an ON CONFLICT style of UPDATE, unless they are manually specified in the :paramref:`.Insert.on_conflict_do_update.set_` dictionary. In order to refer to the proposed insertion row, the special alias :attr:`~.postgresql.Insert.excluded` is available as an attribute on the :class:`_postgresql.Insert` object; this object is a :class:`_expression.ColumnCollection` which alias contains all columns of the target table:: from sqlalchemy.dialects.postgresql import insert stmt = insert(my_table).values( id='some_id', data='inserted value', author='jlh') do_update_stmt = stmt.on_conflict_do_update( index_elements=['id'], set_=dict(data='updated value', author=stmt.excluded.author) ) conn.execute(do_update_stmt) The :meth:`_expression.Insert.on_conflict_do_update` method also accepts a WHERE clause using the :paramref:`.Insert.on_conflict_do_update.where` parameter, which will limit those rows which receive an UPDATE:: from sqlalchemy.dialects.postgresql import insert stmt = insert(my_table).values( id='some_id', data='inserted value', author='jlh') on_update_stmt = stmt.on_conflict_do_update( index_elements=['id'], set_=dict(data='updated value', author=stmt.excluded.author) where=(my_table.c.status == 2) ) conn.execute(on_update_stmt) ``ON CONFLICT`` may also be used to skip inserting a row entirely if any conflict with a unique or exclusion constraint occurs; below this is illustrated using the :meth:`~.postgresql.Insert.on_conflict_do_nothing` method:: from sqlalchemy.dialects.postgresql import insert stmt = insert(my_table).values(id='some_id', data='inserted value') stmt = stmt.on_conflict_do_nothing(index_elements=['id']) conn.execute(stmt) If ``DO NOTHING`` is used without specifying any columns or constraint, it has the effect of skipping the INSERT for any unique or exclusion constraint violation which occurs:: from sqlalchemy.dialects.postgresql import insert stmt = insert(my_table).values(id='some_id', data='inserted value') stmt = stmt.on_conflict_do_nothing() conn.execute(stmt) .. versionadded:: 1.1 Added support for PostgreSQL ON CONFLICT clauses .. seealso:: `INSERT .. ON CONFLICT <http://www.postgresql.org/docs/current/static/sql-insert.html#SQL-ON-CONFLICT>`_ - in the PostgreSQL documentation. .. _postgresql_match: Full Text Search ---------------- SQLAlchemy makes available the PostgreSQL ``@@`` operator via the :meth:`_expression.ColumnElement.match` method on any textual column expression. On a PostgreSQL dialect, an expression like the following:: select([sometable.c.text.match("search string")]) will emit to the database:: SELECT text @@ to_tsquery('search string') FROM table The PostgreSQL text search functions such as ``to_tsquery()`` and ``to_tsvector()`` are available explicitly using the standard :data:`.func` construct. For example:: select([ func.to_tsvector('fat cats ate rats').match('cat & rat') ]) Emits the equivalent of:: SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') The :class:`_postgresql.TSVECTOR` type can provide for explicit CAST:: from sqlalchemy.dialects.postgresql import TSVECTOR from sqlalchemy import select, cast select([cast("some text", TSVECTOR)]) produces a statement equivalent to:: SELECT CAST('some text' AS TSVECTOR) AS anon_1 Full Text Searches in PostgreSQL are influenced by a combination of: the PostgreSQL setting of ``default_text_search_config``, the ``regconfig`` used to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in during a query. When performing a Full Text Search against a column that has a GIN or GiST index that is already pre-computed (which is common on full text searches) one may need to explicitly pass in a particular PostgreSQL ``regconfig`` value to ensure the query-planner utilizes the index and does not re-compute the column on demand. In order to provide for this explicit query planning, or to use different search strategies, the ``match`` method accepts a ``postgresql_regconfig`` keyword argument:: select([mytable.c.id]).where( mytable.c.title.match('somestring', postgresql_regconfig='english') ) Emits the equivalent of:: SELECT mytable.id FROM mytable WHERE mytable.title @@ to_tsquery('english', 'somestring') One can also specifically pass in a `'regconfig'` value to the ``to_tsvector()`` command as the initial argument:: select([mytable.c.id]).where( func.to_tsvector('english', mytable.c.title )\ .match('somestring', postgresql_regconfig='english') ) produces a statement equivalent to:: SELECT mytable.id FROM mytable WHERE to_tsvector('english', mytable.title) @@ to_tsquery('english', 'somestring') It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from PostgreSQL to ensure that you are generating queries with SQLAlchemy that take full advantage of any indexes you may have created for full text search. FROM ONLY ... ------------- The dialect supports PostgreSQL's ONLY keyword for targeting only a particular table in an inheritance hierarchy. This can be used to produce the ``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...`` syntaxes. It uses SQLAlchemy's hints mechanism:: # SELECT ... FROM ONLY ... result = table.select().with_hint(table, 'ONLY', 'postgresql') print(result.fetchall()) # UPDATE ONLY ... table.update(values=dict(foo='bar')).with_hint('ONLY', dialect_name='postgresql') # DELETE FROM ONLY ... table.delete().with_hint('ONLY', dialect_name='postgresql') .. _postgresql_indexes: PostgreSQL-Specific Index Options --------------------------------- Several extensions to the :class:`.Index` construct are available, specific to the PostgreSQL dialect. .. _postgresql_partial_indexes: Partial Indexes ^^^^^^^^^^^^^^^ Partial indexes add criterion to the index definition so that the index is applied to a subset of rows. These can be specified on :class:`.Index` using the ``postgresql_where`` keyword argument:: Index('my_index', my_table.c.id, postgresql_where=my_table.c.value > 10) Operator Classes ^^^^^^^^^^^^^^^^ PostgreSQL allows the specification of an *operator class* for each column of an index (see http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). The :class:`.Index` construct allows these to be specified via the ``postgresql_ops`` keyword argument:: Index( 'my_index', my_table.c.id, my_table.c.data, postgresql_ops={ 'data': 'text_pattern_ops', 'id': 'int4_ops' }) Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of the :class:`_schema.Column`, i.e. the name used to access it from the ``.c`` collection of :class:`_schema.Table`, which can be configured to be different than the actual name of the column as expressed in the database. If ``postgresql_ops`` is to be used against a complex SQL expression such as a function call, then to apply to the column it must be given a label that is identified in the dictionary by name, e.g.:: Index( 'my_index', my_table.c.id, func.lower(my_table.c.data).label('data_lower'), postgresql_ops={ 'data_lower': 'text_pattern_ops', 'id': 'int4_ops' }) Index Types ^^^^^^^^^^^ PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well as the ability for users to create their own (see http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be specified on :class:`.Index` using the ``postgresql_using`` keyword argument:: Index('my_index', my_table.c.data, postgresql_using='gin') The value passed to the keyword argument will be simply passed through to the underlying CREATE INDEX command, so it *must* be a valid index type for your version of PostgreSQL. .. _postgresql_index_storage: Index Storage Parameters ^^^^^^^^^^^^^^^^^^^^^^^^ PostgreSQL allows storage parameters to be set on indexes. The storage parameters available depend on the index method used by the index. Storage parameters can be specified on :class:`.Index` using the ``postgresql_with`` keyword argument:: Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50}) .. versionadded:: 1.0.6 PostgreSQL allows to define the tablespace in which to create the index. The tablespace can be specified on :class:`.Index` using the ``postgresql_tablespace`` keyword argument:: Index('my_index', my_table.c.data, postgresql_tablespace='my_tablespace') .. versionadded:: 1.1 Note that the same option is available on :class:`_schema.Table` as well. .. _postgresql_index_concurrently: Indexes with CONCURRENTLY ^^^^^^^^^^^^^^^^^^^^^^^^^ The PostgreSQL index option CONCURRENTLY is supported by passing the flag ``postgresql_concurrently`` to the :class:`.Index` construct:: tbl = Table('testtbl', m, Column('data', Integer)) idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True) The above index construct will render DDL for CREATE INDEX, assuming PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as:: CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data) For DROP INDEX, assuming PostgreSQL 9.2 or higher is detected or for a connection-less dialect, it will emit:: DROP INDEX CONCURRENTLY test_idx1 .. versionadded:: 1.1 support for CONCURRENTLY on DROP INDEX. The CONCURRENTLY keyword is now only emitted if a high enough version of PostgreSQL is detected on the connection (or for a connection-less dialect). When using CONCURRENTLY, the PostgreSQL database requires that the statement be invoked outside of a transaction block. The Python DBAPI enforces that even for a single statement, a transaction is present, so to use this construct, the DBAPI's "autocommit" mode must be used:: metadata = MetaData() table = Table( "foo", metadata, Column("id", String)) index = Index( "foo_idx", table.c.id, postgresql_concurrently=True) with engine.connect() as conn: with conn.execution_options(isolation_level='AUTOCOMMIT'): table.create(conn) .. seealso:: :ref:`postgresql_isolation_level` .. _postgresql_index_reflection: PostgreSQL Index Reflection --------------------------- The PostgreSQL database creates a UNIQUE INDEX implicitly whenever the UNIQUE CONSTRAINT construct is used. When inspecting a table using :class:`_reflection.Inspector`, the :meth:`_reflection.Inspector.get_indexes` and the :meth:`_reflection.Inspector.get_unique_constraints` will report on these two constructs distinctly; in the case of the index, the key ``duplicates_constraint`` will be present in the index entry if it is detected as mirroring a constraint. When performing reflection using ``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned in :attr:`_schema.Table.indexes` when it is detected as mirroring a :class:`.UniqueConstraint` in the :attr:`_schema.Table.constraints` collection . .. versionchanged:: 1.0.0 - :class:`_schema.Table` reflection now includes :class:`.UniqueConstraint` objects present in the :attr:`_schema.Table.constraints` collection; the PostgreSQL backend will no longer include a "mirrored" :class:`.Index` construct in :attr:`_schema.Table.indexes` if it is detected as corresponding to a unique constraint. Special Reflection Options -------------------------- The :class:`_reflection.Inspector` used for the PostgreSQL backend is an instance of :class:`.PGInspector`, which offers additional methods:: from sqlalchemy import create_engine, inspect engine = create_engine("postgresql+psycopg2://localhost/test") insp = inspect(engine) # will be a PGInspector print(insp.get_enums()) .. autoclass:: PGInspector :members: .. _postgresql_table_options: PostgreSQL Table Options ------------------------ Several options for CREATE TABLE are supported directly by the PostgreSQL dialect in conjunction with the :class:`_schema.Table` construct: * ``TABLESPACE``:: Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace') The above option is also available on the :class:`.Index` construct. * ``ON COMMIT``:: Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS') * ``WITH OIDS``:: Table("some_table", metadata, ..., postgresql_with_oids=True) * ``WITHOUT OIDS``:: Table("some_table", metadata, ..., postgresql_with_oids=False) * ``INHERITS``:: Table("some_table", metadata, ..., postgresql_inherits="some_supertable") Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...)) .. versionadded:: 1.0.0 * ``PARTITION BY``:: Table("some_table", metadata, ..., postgresql_partition_by='LIST (part_column)') .. versionadded:: 1.2.6 .. seealso:: `PostgreSQL CREATE TABLE options <http://www.postgresql.org/docs/current/static/sql-createtable.html>`_ Table values, Row and Tuple objects ----------------------------------- Row Types ^^^^^^^^^ Built-in support for rendering a ``ROW`` is not available yet, however the :func:`_expression.tuple_` may be used in its place. Another alternative is to use the :attr:`_sa.func` generator with ``func.ROW`` :: table.select().where( tuple_(table.c.id, table.c.fk) > (1,2) ).where(func.ROW(table.c.id, table.c.fk) < func.ROW(3, 7)) Will generate the row-wise comparison:: SELECT * FROM table WHERE (id, fk) > (1, 2) AND ROW(id, fk) < ROW(3, 7) .. seealso:: `PostgreSQL Row Constructors <https://www.postgresql.org/docs/current/sql-expressions.html#SQL-SYNTAX-ROW-CONSTRUCTORS>`_ `PostgreSQL Row Constructor Comparison <https://www.postgresql.org/docs/current/functions-comparisons.html#ROW-WISE-COMPARISON>`_ Table Types ^^^^^^^^^^^ PostgreSQL also supports passing a table as an argument to a function. This is not available yet in sqlalchemy, however the :func:`_expression.literal_column` function with the name of the table may be used in its place:: select(['*']).select_from(func.my_function(literal_column('my_table'))) Will generate the SQL:: SELECT * FROM my_function(my_table) ARRAY Types ----------- The PostgreSQL dialect supports arrays, both as multidimensional column types as well as array literals: * :class:`_postgresql.ARRAY` - ARRAY datatype * :class:`_postgresql.array` - array literal * :func:`_postgresql.array_agg` - ARRAY_AGG SQL function * :class:`_postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate function syntax. JSON Types ---------- The PostgreSQL dialect supports both JSON and JSONB datatypes, including psycopg2's native support and support for all of PostgreSQL's special operators: * :class:`_postgresql.JSON` * :class:`_postgresql.JSONB` HSTORE Type ----------- The PostgreSQL HSTORE type as well as hstore literals are supported: * :class:`_postgresql.HSTORE` - HSTORE datatype * :class:`_postgresql.hstore` - hstore literal ENUM Types ---------- PostgreSQL has an independently creatable TYPE structure which is used to implement an enumerated type. This approach introduces significant complexity on the SQLAlchemy side in terms of when this type should be CREATED and DROPPED. The type object is also an independently reflectable entity. The following sections should be consulted: * :class:`_postgresql.ENUM` - DDL and typing support for ENUM. * :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types * :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual CREATE and DROP commands for ENUM. .. _postgresql_array_of_enum: Using ENUM with ARRAY ^^^^^^^^^^^^^^^^^^^^^ The combination of ENUM and ARRAY is not directly supported by backend DBAPIs at this time. Prior to SQLAlchemy 1.3.17, a special workaround was needed in order to allow this combination to work, described below. .. versionchanged:: 1.3.17 The combination of ENUM and ARRAY is now directly handled by SQLAlchemy's implementation without any workarounds needed. .. sourcecode:: python from sqlalchemy import TypeDecorator from sqlalchemy.dialects.postgresql import ARRAY class ArrayOfEnum(TypeDecorator): impl = ARRAY def bind_expression(self, bindvalue): return sa.cast(bindvalue, self) def result_processor(self, dialect, coltype): super_rp = super(ArrayOfEnum, self).result_processor( dialect, coltype) def handle_raw_string(value): inner = re.match(r"^{(.*)}$", value).group(1) return inner.split(",") if inner else [] def process(value): if value is None: return None return super_rp(handle_raw_string(value)) return process E.g.:: Table( 'mydata', metadata, Column('id', Integer, primary_key=True), Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum'))) ) This type is not included as a built-in type as it would be incompatible with a DBAPI that suddenly decides to support ARRAY of ENUM directly in a new version. .. _postgresql_array_of_json: Using JSON/JSONB with ARRAY ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Similar to using ENUM, prior to SQLAlchemy 1.3.17, for an ARRAY of JSON/JSONB we need to render the appropriate CAST. Current psycopg2 drivers accomodate the result set correctly without any special steps. .. versionchanged:: 1.3.17 The combination of JSON/JSONB and ARRAY is now directly handled by SQLAlchemy's implementation without any workarounds needed. .. sourcecode:: python class CastingArray(ARRAY): def bind_expression(self, bindvalue): return sa.cast(bindvalue, self) E.g.:: Table( 'mydata', metadata, Column('id', Integer, primary_key=True), Column('data', CastingArray(JSONB)) ) """ from collections import defaultdict import datetime as dt import re from . import array as _array from . import hstore as _hstore from . import json as _json from . import ranges as _ranges from ... import exc from ... import schema from ... import sql from ... import util from ...engine import default from ...engine import reflection from ...sql import compiler from ...sql import elements from ...sql import expression from ...sql import sqltypes from ...sql import util as sql_util from ...types import BIGINT from ...types import BOOLEAN from ...types import CHAR from ...types import DATE from ...types import FLOAT from ...types import INTEGER from ...types import NUMERIC from ...types import REAL from ...types import SMALLINT from ...types import TEXT from ...types import VARCHAR try: from uuid import UUID as _python_UUID # noqa except ImportError: _python_UUID = None IDX_USING = re.compile(r"^(?:btree|hash|gist|gin|[\w_]+)$", re.I) AUTOCOMMIT_REGEXP = re.compile( r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|GRANT|REVOKE|" "IMPORT FOREIGN SCHEMA|REFRESH MATERIALIZED VIEW|TRUNCATE)", re.I | re.UNICODE, ) RESERVED_WORDS = set( [ "all", "analyse", "analyze", "and", "any", "array", "as", "asc", "asymmetric", "both", "case", "cast", "check", "collate", "column", "constraint", "create", "current_catalog", "current_date", "current_role", "current_time", "current_timestamp", "current_user", "default", "deferrable", "desc", "distinct", "do", "else", "end", "except", "false", "fetch", "for", "foreign", "from", "grant", "group", "having", "in", "initially", "intersect", "into", "leading", "limit", "localtime", "localtimestamp", "new", "not", "null", "of", "off", "offset", "old", "on", "only", "or", "order", "placing", "primary", "references", "returning", "select", "session_user", "some", "symmetric", "table", "then", "to", "trailing", "true", "union", "unique", "user", "using", "variadic", "when", "where", "window", "with", "authorization", "between", "binary", "cross", "current_schema", "freeze", "full", "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", "notnull", "outer", "over", "overlaps", "right", "similar", "verbose", ] ) _DECIMAL_TYPES = (1231, 1700) _FLOAT_TYPES = (700, 701, 1021, 1022) _INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016) class BYTEA(sqltypes.LargeBinary): __visit_name__ = "BYTEA" class DOUBLE_PRECISION(sqltypes.Float): __visit_name__ = "DOUBLE_PRECISION" class INET(sqltypes.TypeEngine): __visit_name__ = "INET" PGInet = INET class CIDR(sqltypes.TypeEngine): __visit_name__ = "CIDR" PGCidr = CIDR class MACADDR(sqltypes.TypeEngine): __visit_name__ = "MACADDR" PGMacAddr = MACADDR class MONEY(sqltypes.TypeEngine): """Provide the PostgreSQL MONEY type. .. versionadded:: 1.2 """ __visit_name__ = "MONEY" class OID(sqltypes.TypeEngine): """Provide the PostgreSQL OID type. .. versionadded:: 0.9.5 """ __visit_name__ = "OID" class REGCLASS(sqltypes.TypeEngine): """Provide the PostgreSQL REGCLASS type. .. versionadded:: 1.2.7 """ __visit_name__ = "REGCLASS" class TIMESTAMP(sqltypes.TIMESTAMP): def __init__(self, timezone=False, precision=None): super(TIMESTAMP, self).__init__(timezone=timezone) self.precision = precision class TIME(sqltypes.TIME): def __init__(self, timezone=False, precision=None): super(TIME, self).__init__(timezone=timezone) self.precision = precision class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval): """PostgreSQL INTERVAL type. The INTERVAL type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000 or zxjdbc. """ __visit_name__ = "INTERVAL" native = True def __init__(self, precision=None, fields=None): """Construct an INTERVAL. :param precision: optional integer precision value :param fields: string fields specifier. allows storage of fields to be limited, such as ``"YEAR"``, ``"MONTH"``, ``"DAY TO HOUR"``, etc. .. versionadded:: 1.2 """ self.precision = precision self.fields = fields @classmethod def adapt_emulated_to_native(cls, interval, **kw): return INTERVAL(precision=interval.second_precision) @property def _type_affinity(self): return sqltypes.Interval @property def python_type(self): return dt.timedelta PGInterval = INTERVAL class BIT(sqltypes.TypeEngine): __visit_name__ = "BIT" def __init__(self, length=None, varying=False): if not varying: # BIT without VARYING defaults to length 1 self.length = length or 1 else: # but BIT VARYING can be unlimited-length, so no default self.length = length self.varying = varying PGBit = BIT class UUID(sqltypes.TypeEngine): """PostgreSQL UUID type. Represents the UUID column type, interpreting data either as natively returned by the DBAPI or as Python uuid objects. The UUID type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000. """ __visit_name__ = "UUID" def __init__(self, as_uuid=False): """Construct a UUID type. :param as_uuid=False: if True, values will be interpreted as Python uuid objects, converting to/from string via the DBAPI. """ if as_uuid and _python_UUID is None: raise NotImplementedError( "This version of Python does not support " "the native UUID type." ) self.as_uuid = as_uuid def bind_processor(self, dialect): if self.as_uuid: def process(value): if value is not None: value = util.text_type(value) return value return process else: return None def result_processor(self, dialect, coltype): if self.as_uuid: def process(value): if value is not None: value = _python_UUID(value) return value return process else: return None PGUuid = UUID class TSVECTOR(sqltypes.TypeEngine): """The :class:`_postgresql.TSVECTOR` type implements the PostgreSQL text search type TSVECTOR. It can be used to do full text queries on natural language documents. .. versionadded:: 0.9.0 .. seealso:: :ref:`postgresql_match` """ __visit_name__ = "TSVECTOR" class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum): """PostgreSQL ENUM type. This is a subclass of :class:`_types.Enum` which includes support for PG's ``CREATE TYPE`` and ``DROP TYPE``. When the builtin type :class:`_types.Enum` is used and the :paramref:`.Enum.native_enum` flag is left at its default of True, the PostgreSQL backend will use a :class:`_postgresql.ENUM` type as the implementation, so the special create/drop rules will be used. The create/drop behavior of ENUM is necessarily intricate, due to the awkward relationship the ENUM type has in relationship to the parent table, in that it may be "owned" by just a single table, or may be shared among many tables. When using :class:`_types.Enum` or :class:`_postgresql.ENUM` in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted corresponding to when the :meth:`_schema.Table.create` and :meth:`_schema.Table.drop` methods are called:: table = Table('sometable', metadata, Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) ) table.create(engine) # will emit CREATE ENUM and CREATE TABLE table.drop(engine) # will emit DROP TABLE and DROP ENUM To use a common enumerated type between multiple tables, the best practice is to declare the :class:`_types.Enum` or :class:`_postgresql.ENUM` independently, and associate it with the :class:`_schema.MetaData` object itself:: my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) t1 = Table('sometable_one', metadata, Column('some_enum', myenum) ) t2 = Table('sometable_two', metadata, Column('some_enum', myenum) ) When this pattern is used, care must still be taken at the level of individual table creates. Emitting CREATE TABLE without also specifying ``checkfirst=True`` will still cause issues:: t1.create(engine) # will fail: no such type 'myenum' If we specify ``checkfirst=True``, the individual table-level create operation will check for the ``ENUM`` and create if not exists:: # will check if enum exists, and emit CREATE TYPE if not t1.create(engine, checkfirst=True) When using a metadata-level ENUM type, the type will always be created and dropped if either the metadata-wide create/drop is called:: metadata.create_all(engine) # will emit CREATE TYPE metadata.drop_all(engine) # will emit DROP TYPE The type can also be created and dropped directly:: my_enum.create(engine) my_enum.drop(engine) .. versionchanged:: 1.0.0 The PostgreSQL :class:`_postgresql.ENUM` type now behaves more strictly with regards to CREATE/DROP. A metadata-level ENUM type will only be created and dropped at the metadata level, not the table level, with the exception of ``table.create(checkfirst=True)``. The ``table.drop()`` call will now emit a DROP TYPE for a table-level enumerated type. """ native_enum = True def __init__(self, *enums, **kw): """Construct an :class:`_postgresql.ENUM`. Arguments are the same as that of :class:`_types.Enum`, but also including the following parameters. :param create_type: Defaults to True. Indicates that ``CREATE TYPE`` should be emitted, after optionally checking for the presence of the type, when the parent table is being created; and additionally that ``DROP TYPE`` is called when the table is dropped. When ``False``, no check will be performed and no ``CREATE TYPE`` or ``DROP TYPE`` is emitted, unless :meth:`~.postgresql.ENUM.create` or :meth:`~.postgresql.ENUM.drop` are called directly. Setting to ``False`` is helpful when invoking a creation scheme to a SQL file without access to the actual database - the :meth:`~.postgresql.ENUM.create` and :meth:`~.postgresql.ENUM.drop` methods can be used to emit SQL to a target bind. """ self.create_type = kw.pop("create_type", True) super(ENUM, self).__init__(*enums, **kw) @classmethod def adapt_emulated_to_native(cls, impl, **kw): """Produce a PostgreSQL native :class:`_postgresql.ENUM` from plain :class:`.Enum`. """ kw.setdefault("validate_strings", impl.validate_strings) kw.setdefault("name", impl.name) kw.setdefault("schema", impl.schema) kw.setdefault("inherit_schema", impl.inherit_schema) kw.setdefault("metadata", impl.metadata) kw.setdefault("_create_events", False) kw.setdefault("values_callable", impl.values_callable) return cls(**kw) def create(self, bind=None, checkfirst=True): """Emit ``CREATE TYPE`` for this :class:`_postgresql.ENUM`. If the underlying dialect does not support PostgreSQL CREATE TYPE, no action is taken. :param bind: a connectable :class:`_engine.Engine`, :class:`_engine.Connection`, or similar object to emit SQL. :param checkfirst: if ``True``, a query against the PG catalog will be first performed to see if the type does not exist already before creating. """ if not bind.dialect.supports_native_enum: return if not checkfirst or not bind.dialect.has_type( bind, self.name, schema=self.schema ): bind.execute(CreateEnumType(self)) def drop(self, bind=None, checkfirst=True): """Emit ``DROP TYPE`` for this :class:`_postgresql.ENUM`. If the underlying dialect does not support PostgreSQL DROP TYPE, no action is taken. :param bind: a connectable :class:`_engine.Engine`, :class:`_engine.Connection`, or similar object to emit SQL. :param checkfirst: if ``True``, a query against the PG catalog will be first performed to see if the type actually exists before dropping. """ if not bind.dialect.supports_native_enum: return if not checkfirst or bind.dialect.has_type( bind, self.name, schema=self.schema ): bind.execute(DropEnumType(self)) def _check_for_name_in_memos(self, checkfirst, kw): """Look in the 'ddl runner' for 'memos', then note our name in that collection. This to ensure a particular named enum is operated upon only once within any kind of create/drop sequence without relying upon "checkfirst". """ if not self.create_type: return True if "_ddl_runner" in kw: ddl_runner = kw["_ddl_runner"] if "_pg_enums" in ddl_runner.memo: pg_enums = ddl_runner.memo["_pg_enums"] else: pg_enums = ddl_runner.memo["_pg_enums"] = set() present = (self.schema, self.name) in pg_enums pg_enums.add((self.schema, self.name)) return present else: return False def _on_table_create(self, target, bind, checkfirst=False, **kw): if ( checkfirst or ( not self.metadata and not kw.get("_is_metadata_operation", False) ) and not self._check_for_name_in_memos(checkfirst, kw) ): self.create(bind=bind, checkfirst=checkfirst) def _on_table_drop(self, target, bind, checkfirst=False, **kw): if ( not self.metadata and not kw.get("_is_metadata_operation", False) and not self._check_for_name_in_memos(checkfirst, kw) ): self.drop(bind=bind, checkfirst=checkfirst) def _on_metadata_create(self, target, bind, checkfirst=False, **kw): if not self._check_for_name_in_memos(checkfirst, kw): self.create(bind=bind, checkfirst=checkfirst) def _on_metadata_drop(self, target, bind, checkfirst=False, **kw): if not self._check_for_name_in_memos(checkfirst, kw): self.drop(bind=bind, checkfirst=checkfirst) colspecs = { sqltypes.ARRAY: _array.ARRAY, sqltypes.Interval: INTERVAL, sqltypes.Enum: ENUM, sqltypes.JSON.JSONPathType: _json.JSONPathType, sqltypes.JSON: _json.JSON, } ischema_names = { "_array": _array.ARRAY, "hstore": _hstore.HSTORE, "json": _json.JSON, "jsonb": _json.JSONB, "int4range": _ranges.INT4RANGE, "int8range": _ranges.INT8RANGE, "numrange": _ranges.NUMRANGE, "daterange": _ranges.DATERANGE, "tsrange": _ranges.TSRANGE, "tstzrange": _ranges.TSTZRANGE, "integer": INTEGER, "bigint": BIGINT, "smallint": SMALLINT, "character varying": VARCHAR, "character": CHAR, '"char"': sqltypes.String, "name": sqltypes.String, "text": TEXT, "numeric": NUMERIC, "float": FLOAT, "real": REAL, "inet": INET, "cidr": CIDR, "uuid": UUID, "bit": BIT, "bit varying": BIT, "macaddr": MACADDR, "money": MONEY, "oid": OID, "regclass": REGCLASS, "double precision": DOUBLE_PRECISION, "timestamp": TIMESTAMP, "timestamp with time zone": TIMESTAMP, "timestamp without time zone": TIMESTAMP, "time with time zone": TIME, "time without time zone": TIME, "date": DATE, "time": TIME, "bytea": BYTEA, "boolean": BOOLEAN, "interval": INTERVAL, "tsvector": TSVECTOR, } class PGCompiler(compiler.SQLCompiler): def visit_array(self, element, **kw): return "ARRAY[%s]" % self.visit_clauselist(element, **kw) def visit_slice(self, element, **kw): return "%s:%s" % ( self.process(element.start, **kw), self.process(element.stop, **kw), ) def visit_json_getitem_op_binary( self, binary, operator, _cast_applied=False, **kw ): if ( not _cast_applied and binary.type._type_affinity is not sqltypes.JSON ): kw["_cast_applied"] = True return self.process(sql.cast(binary, binary.type), **kw) kw["eager_grouping"] = True return self._generate_generic_binary( binary, " -> " if not _cast_applied else " ->> ", **kw ) def visit_json_path_getitem_op_binary( self, binary, operator, _cast_applied=False, **kw ): if ( not _cast_applied and binary.type._type_affinity is not sqltypes.JSON ): kw["_cast_applied"] = True return self.process(sql.cast(binary, binary.type), **kw) kw["eager_grouping"] = True return self._generate_generic_binary( binary, " #> " if not _cast_applied else " #>> ", **kw ) def visit_getitem_binary(self, binary, operator, **kw): return "%s[%s]" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), ) def visit_aggregate_order_by(self, element, **kw): return "%s ORDER BY %s" % ( self.process(element.target, **kw), self.process(element.order_by, **kw), ) def visit_match_op_binary(self, binary, operator, **kw): if "postgresql_regconfig" in binary.modifiers: regconfig = self.render_literal_value( binary.modifiers["postgresql_regconfig"], sqltypes.STRINGTYPE ) if regconfig: return "%s @@ to_tsquery(%s, %s)" % ( self.process(binary.left, **kw), regconfig, self.process(binary.right, **kw), ) return "%s @@ to_tsquery(%s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), ) def visit_ilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return "%s ILIKE %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), ) + ( " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else "" ) def visit_notilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return "%s NOT ILIKE %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), ) + ( " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else "" ) def visit_empty_set_expr(self, element_types): # cast the empty set to the type we are comparing against. if # we are comparing against the null type, pick an arbitrary # datatype for the empty set return "SELECT %s WHERE 1!=1" % ( ", ".join( "CAST(NULL AS %s)" % self.dialect.type_compiler.process( INTEGER() if type_._isnull else type_ ) for type_ in element_types or [INTEGER()] ), ) def render_literal_value(self, value, type_): value = super(PGCompiler, self).render_literal_value(value, type_) if self.dialect._backslash_escapes: value = value.replace("\\", "\\\\") return value def visit_sequence(self, seq, **kw): return "nextval('%s')" % self.preparer.format_sequence(seq) def limit_clause(self, select, **kw): text = "" if select._limit_clause is not None: text += " \n LIMIT " + self.process(select._limit_clause, **kw) if select._offset_clause is not None: if select._limit_clause is None: text += " \n LIMIT ALL" text += " OFFSET " + self.process(select._offset_clause, **kw) return text def format_from_hint_text(self, sqltext, table, hint, iscrud): if hint.upper() != "ONLY": raise exc.CompileError("Unrecognized hint: %r" % hint) return "ONLY " + sqltext def get_select_precolumns(self, select, **kw): if select._distinct is not False: if select._distinct is True: return "DISTINCT " elif isinstance(select._distinct, (list, tuple)): return ( "DISTINCT ON (" + ", ".join( [self.process(col, **kw) for col in select._distinct] ) + ") " ) else: return ( "DISTINCT ON (" + self.process(select._distinct, **kw) + ") " ) else: return "" def for_update_clause(self, select, **kw): if select._for_update_arg.read: if select._for_update_arg.key_share: tmp = " FOR KEY SHARE" else: tmp = " FOR SHARE" elif select._for_update_arg.key_share: tmp = " FOR NO KEY UPDATE" else: tmp = " FOR UPDATE" if select._for_update_arg.of: tables = util.OrderedSet() for c in select._for_update_arg.of: tables.update(sql_util.surface_selectables_only(c)) tmp += " OF " + ", ".join( self.process(table, ashint=True, use_schema=False, **kw) for table in tables ) if select._for_update_arg.nowait: tmp += " NOWAIT" if select._for_update_arg.skip_locked: tmp += " SKIP LOCKED" return tmp def returning_clause(self, stmt, returning_cols): columns = [ self._label_select_column(None, c, True, False, {}) for c in expression._select_iterables(returning_cols) ] return "RETURNING " + ", ".join(columns) def visit_substring_func(self, func, **kw): s = self.process(func.clauses.clauses[0], **kw) start = self.process(func.clauses.clauses[1], **kw) if len(func.clauses.clauses) > 2: length = self.process(func.clauses.clauses[2], **kw) return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length) else: return "SUBSTRING(%s FROM %s)" % (s, start) def _on_conflict_target(self, clause, **kw): if clause.constraint_target is not None: target_text = "ON CONSTRAINT %s" % clause.constraint_target elif clause.inferred_target_elements is not None: target_text = "(%s)" % ", ".join( ( self.preparer.quote(c) if isinstance(c, util.string_types) else self.process(c, include_table=False, use_schema=False) ) for c in clause.inferred_target_elements ) if clause.inferred_target_whereclause is not None: target_text += " WHERE %s" % self.process( clause.inferred_target_whereclause, include_table=False, use_schema=False, ) else: target_text = "" return target_text def visit_on_conflict_do_nothing(self, on_conflict, **kw): target_text = self._on_conflict_target(on_conflict, **kw) if target_text: return "ON CONFLICT %s DO NOTHING" % target_text else: return "ON CONFLICT DO NOTHING" def visit_on_conflict_do_update(self, on_conflict, **kw): clause = on_conflict target_text = self._on_conflict_target(on_conflict, **kw) action_set_ops = [] set_parameters = dict(clause.update_values_to_set) # create a list of column assignment clauses as tuples insert_statement = self.stack[-1]["selectable"] cols = insert_statement.table.c for c in cols: col_key = c.key if col_key in set_parameters: value = set_parameters.pop(col_key) if elements._is_literal(value): value = elements.BindParameter(None, value, type_=c.type) else: if ( isinstance(value, elements.BindParameter) and value.type._isnull ): value = value._clone() value.type = c.type value_text = self.process(value.self_group(), use_schema=False) key_text = self.preparer.quote(col_key) action_set_ops.append("%s = %s" % (key_text, value_text)) # check for names that don't match columns if set_parameters: util.warn( "Additional column names not matching " "any column keys in table '%s': %s" % ( self.statement.table.name, (", ".join("'%s'" % c for c in set_parameters)), ) ) for k, v in set_parameters.items(): key_text = ( self.preparer.quote(k) if isinstance(k, util.string_types) else self.process(k, use_schema=False) ) value_text = self.process( elements._literal_as_binds(v), use_schema=False ) action_set_ops.append("%s = %s" % (key_text, value_text)) action_text = ", ".join(action_set_ops) if clause.update_whereclause is not None: action_text += " WHERE %s" % self.process( clause.update_whereclause, include_table=True, use_schema=False ) return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text) def update_from_clause( self, update_stmt, from_table, extra_froms, from_hints, **kw ): return "FROM " + ", ".join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in extra_froms ) def delete_extra_from_clause( self, delete_stmt, from_table, extra_froms, from_hints, **kw ): """Render the DELETE .. USING clause specific to PostgreSQL.""" return "USING " + ", ".join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in extra_froms ) class PGDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) impl_type = column.type.dialect_impl(self.dialect) if isinstance(impl_type, sqltypes.TypeDecorator): impl_type = impl_type.impl if ( column.primary_key and column is column.table._autoincrement_column and ( self.dialect.supports_smallserial or not isinstance(impl_type, sqltypes.SmallInteger) ) and ( column.default is None or ( isinstance(column.default, schema.Sequence) and column.default.optional ) ) ): if isinstance(impl_type, sqltypes.BigInteger): colspec += " BIGSERIAL" elif isinstance(impl_type, sqltypes.SmallInteger): colspec += " SMALLSERIAL" else: colspec += " SERIAL" else: colspec += " " + self.dialect.type_compiler.process( column.type, type_expression=column, identifier_preparer=self.preparer, ) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if column.computed is not None: colspec += " " + self.process(column.computed) if not column.nullable: colspec += " NOT NULL" return colspec def visit_check_constraint(self, constraint): if constraint._type_bound: typ = list(constraint.columns)[0].type if ( isinstance(typ, sqltypes.ARRAY) and isinstance(typ.item_type, sqltypes.Enum) and not typ.item_type.native_enum ): raise exc.CompileError( "PostgreSQL dialect cannot produce the CHECK constraint " "for ARRAY of non-native ENUM; please specify " "create_constraint=False on this Enum datatype." ) return super(PGDDLCompiler, self).visit_check_constraint(constraint) def visit_drop_table_comment(self, drop): return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table( drop.element ) def visit_create_enum_type(self, create): type_ = create.element return "CREATE TYPE %s AS ENUM (%s)" % ( self.preparer.format_type(type_), ", ".join( self.sql_compiler.process(sql.literal(e), literal_binds=True) for e in type_.enums ), ) def visit_drop_enum_type(self, drop): type_ = drop.element return "DROP TYPE %s" % (self.preparer.format_type(type_)) def visit_create_index(self, create): preparer = self.preparer index = create.element self._verify_index_table(index) text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX " if self.dialect._supports_create_index_concurrently: concurrently = index.dialect_options["postgresql"]["concurrently"] if concurrently: text += "CONCURRENTLY " text += "%s ON %s " % ( self._prepared_index_name(index, include_schema=False), preparer.format_table(index.table), ) using = index.dialect_options["postgresql"]["using"] if using: text += ( "USING %s " % self.preparer.validate_sql_phrase(using, IDX_USING).lower() ) ops = index.dialect_options["postgresql"]["ops"] text += "(%s)" % ( ", ".join( [ self.sql_compiler.process( expr.self_group() if not isinstance(expr, expression.ColumnClause) else expr, include_table=False, literal_binds=True, ) + ( (" " + ops[expr.key]) if hasattr(expr, "key") and expr.key in ops else "" ) for expr in index.expressions ] ) ) withclause = index.dialect_options["postgresql"]["with"] if withclause: text += " WITH (%s)" % ( ", ".join( [ "%s = %s" % storage_parameter for storage_parameter in withclause.items() ] ) ) tablespace_name = index.dialect_options["postgresql"]["tablespace"] if tablespace_name: text += " TABLESPACE %s" % preparer.quote(tablespace_name) whereclause = index.dialect_options["postgresql"]["where"] if whereclause is not None: where_compiled = self.sql_compiler.process( whereclause, include_table=False, literal_binds=True ) text += " WHERE " + where_compiled return text def visit_drop_index(self, drop): index = drop.element text = "\nDROP INDEX " if self.dialect._supports_drop_index_concurrently: concurrently = index.dialect_options["postgresql"]["concurrently"] if concurrently: text += "CONCURRENTLY " text += self._prepared_index_name(index, include_schema=True) return text def visit_exclude_constraint(self, constraint, **kw): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % self.preparer.format_constraint( constraint ) elements = [] for expr, name, op in constraint._render_exprs: kw["include_table"] = False elements.append( "%s WITH %s" % (self.sql_compiler.process(expr, **kw), op) ) text += "EXCLUDE USING %s (%s)" % ( self.preparer.validate_sql_phrase( constraint.using, IDX_USING ).lower(), ", ".join(elements), ) if constraint.where is not None: text += " WHERE (%s)" % self.sql_compiler.process( constraint.where, literal_binds=True ) text += self.define_constraint_deferrability(constraint) return text def post_create_table(self, table): table_opts = [] pg_opts = table.dialect_options["postgresql"] inherits = pg_opts.get("inherits") if inherits is not None: if not isinstance(inherits, (list, tuple)): inherits = (inherits,) table_opts.append( "\n INHERITS ( " + ", ".join(self.preparer.quote(name) for name in inherits) + " )" ) if pg_opts["partition_by"]: table_opts.append("\n PARTITION BY %s" % pg_opts["partition_by"]) if pg_opts["with_oids"] is True: table_opts.append("\n WITH OIDS") elif pg_opts["with_oids"] is False: table_opts.append("\n WITHOUT OIDS") if pg_opts["on_commit"]: on_commit_options = pg_opts["on_commit"].replace("_", " ").upper() table_opts.append("\n ON COMMIT %s" % on_commit_options) if pg_opts["tablespace"]: tablespace_name = pg_opts["tablespace"] table_opts.append( "\n TABLESPACE %s" % self.preparer.quote(tablespace_name) ) return "".join(table_opts) def visit_computed_column(self, generated): if generated.persisted is False: raise exc.CompileError( "PostrgreSQL computed columns do not support 'virtual' " "persistence; set the 'persisted' flag to None or True for " "PostgreSQL support." ) return "GENERATED ALWAYS AS (%s) STORED" % self.sql_compiler.process( generated.sqltext, include_table=False, literal_binds=True ) class PGTypeCompiler(compiler.GenericTypeCompiler): def visit_TSVECTOR(self, type_, **kw): return "TSVECTOR" def visit_INET(self, type_, **kw): return "INET" def visit_CIDR(self, type_, **kw): return "CIDR" def visit_MACADDR(self, type_, **kw): return "MACADDR" def visit_MONEY(self, type_, **kw): return "MONEY" def visit_OID(self, type_, **kw): return "OID" def visit_REGCLASS(self, type_, **kw): return "REGCLASS" def visit_FLOAT(self, type_, **kw): if not type_.precision: return "FLOAT" else: return "FLOAT(%(precision)s)" % {"precision": type_.precision} def visit_DOUBLE_PRECISION(self, type_, **kw): return "DOUBLE PRECISION" def visit_BIGINT(self, type_, **kw): return "BIGINT" def visit_HSTORE(self, type_, **kw): return "HSTORE" def visit_JSON(self, type_, **kw): return "JSON" def visit_JSONB(self, type_, **kw): return "JSONB" def visit_INT4RANGE(self, type_, **kw): return "INT4RANGE" def visit_INT8RANGE(self, type_, **kw): return "INT8RANGE" def visit_NUMRANGE(self, type_, **kw): return "NUMRANGE" def visit_DATERANGE(self, type_, **kw): return "DATERANGE" def visit_TSRANGE(self, type_, **kw): return "TSRANGE" def visit_TSTZRANGE(self, type_, **kw): return "TSTZRANGE" def visit_datetime(self, type_, **kw): return self.visit_TIMESTAMP(type_, **kw) def visit_enum(self, type_, **kw): if not type_.native_enum or not self.dialect.supports_native_enum: return super(PGTypeCompiler, self).visit_enum(type_, **kw) else: return self.visit_ENUM(type_, **kw) def visit_ENUM(self, type_, identifier_preparer=None, **kw): if identifier_preparer is None: identifier_preparer = self.dialect.identifier_preparer return identifier_preparer.format_type(type_) def visit_TIMESTAMP(self, type_, **kw): return "TIMESTAMP%s %s" % ( "(%d)" % type_.precision if getattr(type_, "precision", None) is not None else "", (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE", ) def visit_TIME(self, type_, **kw): return "TIME%s %s" % ( "(%d)" % type_.precision if getattr(type_, "precision", None) is not None else "", (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE", ) def visit_INTERVAL(self, type_, **kw): text = "INTERVAL" if type_.fields is not None: text += " " + type_.fields if type_.precision is not None: text += " (%d)" % type_.precision return text def visit_BIT(self, type_, **kw): if type_.varying: compiled = "BIT VARYING" if type_.length is not None: compiled += "(%d)" % type_.length else: compiled = "BIT(%d)" % type_.length return compiled def visit_UUID(self, type_, **kw): return "UUID" def visit_large_binary(self, type_, **kw): return self.visit_BYTEA(type_, **kw) def visit_BYTEA(self, type_, **kw): return "BYTEA" def visit_ARRAY(self, type_, **kw): # TODO: pass **kw? inner = self.process(type_.item_type) return re.sub( r"((?: COLLATE.*)?)$", ( r"%s\1" % ( "[]" * (type_.dimensions if type_.dimensions is not None else 1) ) ), inner, count=1, ) class PGIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def _unquote_identifier(self, value): if value[0] == self.initial_quote: value = value[1:-1].replace( self.escape_to_quote, self.escape_quote ) return value def format_type(self, type_, use_schema=True): if not type_.name: raise exc.CompileError("PostgreSQL ENUM type requires a name.") name = self.quote(type_.name) effective_schema = self.schema_for_object(type_) if ( not self.omit_schema and use_schema and effective_schema is not None ): name = self.quote_schema(effective_schema) + "." + name return name class PGInspector(reflection.Inspector): def __init__(self, conn): reflection.Inspector.__init__(self, conn) def get_table_oid(self, table_name, schema=None): """Return the OID for the given table name.""" return self.dialect.get_table_oid( self.bind, table_name, schema, info_cache=self.info_cache ) def get_enums(self, schema=None): """Return a list of ENUM objects. Each member is a dictionary containing these fields: * name - name of the enum * schema - the schema name for the enum. * visible - boolean, whether or not this enum is visible in the default search path. * labels - a list of string labels that apply to the enum. :param schema: schema name. If None, the default schema (typically 'public') is used. May also be set to '*' to indicate load enums for all schemas. .. versionadded:: 1.0.0 """ schema = schema or self.default_schema_name return self.dialect._load_enums(self.bind, schema) def get_foreign_table_names(self, schema=None): """Return a list of FOREIGN TABLE names. Behavior is similar to that of :meth:`_reflection.Inspector.get_table_names`, except that the list is limited to those tables that report a ``relkind`` value of ``f``. .. versionadded:: 1.0.0 """ schema = schema or self.default_schema_name return self.dialect._get_foreign_table_names(self.bind, schema) def get_view_names(self, schema=None, include=("plain", "materialized")): """Return all view names in `schema`. :param schema: Optional, retrieve names from a non-default schema. For special quoting, use :class:`.quoted_name`. :param include: specify which types of views to return. Passed as a string value (for a single type) or a tuple (for any number of types). Defaults to ``('plain', 'materialized')``. .. versionadded:: 1.1 """ return self.dialect.get_view_names( self.bind, schema, info_cache=self.info_cache, include=include ) class CreateEnumType(schema._CreateDropBase): __visit_name__ = "create_enum_type" class DropEnumType(schema._CreateDropBase): __visit_name__ = "drop_enum_type" class PGExecutionContext(default.DefaultExecutionContext): def fire_sequence(self, seq, type_): return self._execute_scalar( ( "select nextval('%s')" % self.dialect.identifier_preparer.format_sequence(seq) ), type_, ) def get_insert_default(self, column): if column.primary_key and column is column.table._autoincrement_column: if column.server_default and column.server_default.has_argument: # pre-execute passive defaults on primary key columns return self._execute_scalar( "select %s" % column.server_default.arg, column.type ) elif column.default is None or ( column.default.is_sequence and column.default.optional ): # execute the sequence associated with a SERIAL primary # key column. for non-primary-key SERIAL, the ID just # generates server side. try: seq_name = column._postgresql_seq_name except AttributeError: tab = column.table.name col = column.name tab = tab[0 : 29 + max(0, (29 - len(col)))] col = col[0 : 29 + max(0, (29 - len(tab)))] name = "%s_%s_seq" % (tab, col) column._postgresql_seq_name = seq_name = name if column.table is not None: effective_schema = self.connection.schema_for_object( column.table ) else: effective_schema = None if effective_schema is not None: exc = 'select nextval(\'"%s"."%s"\')' % ( effective_schema, seq_name, ) else: exc = "select nextval('\"%s\"')" % (seq_name,) return self._execute_scalar(exc, column.type) return super(PGExecutionContext, self).get_insert_default(column) def should_autocommit_text(self, statement): return AUTOCOMMIT_REGEXP.match(statement) class PGDialect(default.DefaultDialect): name = "postgresql" supports_alter = True max_identifier_length = 63 supports_sane_rowcount = True supports_native_enum = True supports_native_boolean = True supports_smallserial = True supports_sequences = True sequences_optional = True preexecute_autoincrement_sequences = True postfetch_lastrowid = False supports_comments = True supports_default_values = True supports_empty_insert = False supports_multivalues_insert = True default_paramstyle = "pyformat" ischema_names = ischema_names colspecs = colspecs statement_compiler = PGCompiler ddl_compiler = PGDDLCompiler type_compiler = PGTypeCompiler preparer = PGIdentifierPreparer execution_ctx_cls = PGExecutionContext inspector = PGInspector isolation_level = None construct_arguments = [ ( schema.Index, { "using": False, "where": None, "ops": {}, "concurrently": False, "with": {}, "tablespace": None, }, ), ( schema.Table, { "ignore_search_path": False, "tablespace": None, "partition_by": None, "with_oids": None, "on_commit": None, "inherits": None, }, ), ] reflection_options = ("postgresql_ignore_search_path",) _backslash_escapes = True _supports_create_index_concurrently = True _supports_drop_index_concurrently = True def __init__( self, isolation_level=None, json_serializer=None, json_deserializer=None, **kwargs ): default.DefaultDialect.__init__(self, **kwargs) self.isolation_level = isolation_level self._json_deserializer = json_deserializer self._json_serializer = json_serializer def initialize(self, connection): super(PGDialect, self).initialize(connection) self.implicit_returning = self.server_version_info > ( 8, 2, ) and self.__dict__.get("implicit_returning", True) self.supports_native_enum = self.server_version_info >= (8, 3) if not self.supports_native_enum: self.colspecs = self.colspecs.copy() # pop base Enum type self.colspecs.pop(sqltypes.Enum, None) # psycopg2, others may have placed ENUM here as well self.colspecs.pop(ENUM, None) # http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689 self.supports_smallserial = self.server_version_info >= (9, 2) self._backslash_escapes = ( self.server_version_info < (8, 2) or connection.scalar("show standard_conforming_strings") == "off" ) self._supports_create_index_concurrently = ( self.server_version_info >= (8, 2) ) self._supports_drop_index_concurrently = self.server_version_info >= ( 9, 2, ) def on_connect(self): if self.isolation_level is not None: def connect(conn): self.set_isolation_level(conn, self.isolation_level) return connect else: return None _isolation_lookup = set( [ "SERIALIZABLE", "READ UNCOMMITTED", "READ COMMITTED", "REPEATABLE READ", ] ) def set_isolation_level(self, connection, level): level = level.replace("_", " ") if level not in self._isolation_lookup: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) cursor = connection.cursor() cursor.execute( "SET SESSION CHARACTERISTICS AS TRANSACTION " "ISOLATION LEVEL %s" % level ) cursor.execute("COMMIT") cursor.close() def get_isolation_level(self, connection): cursor = connection.cursor() cursor.execute("show transaction isolation level") val = cursor.fetchone()[0] cursor.close() return val.upper() def do_begin_twophase(self, connection, xid): self.do_begin(connection.connection) def do_prepare_twophase(self, connection, xid): connection.execute("PREPARE TRANSACTION '%s'" % xid) def do_rollback_twophase( self, connection, xid, is_prepared=True, recover=False ): if is_prepared: if recover: # FIXME: ugly hack to get out of transaction # context when committing recoverable transactions # Must find out a way how to make the dbapi not # open a transaction. connection.execute("ROLLBACK") connection.execute("ROLLBACK PREPARED '%s'" % xid) connection.execute("BEGIN") self.do_rollback(connection.connection) else: self.do_rollback(connection.connection) def do_commit_twophase( self, connection, xid, is_prepared=True, recover=False ): if is_prepared: if recover: connection.execute("ROLLBACK") connection.execute("COMMIT PREPARED '%s'" % xid) connection.execute("BEGIN") self.do_rollback(connection.connection) else: self.do_commit(connection.connection) def do_recover_twophase(self, connection): resultset = connection.execute( sql.text("SELECT gid FROM pg_prepared_xacts") ) return [row[0] for row in resultset] def _get_default_schema_name(self, connection): return connection.scalar("select current_schema()") def has_schema(self, connection, schema): query = ( "select nspname from pg_namespace " "where lower(nspname)=:schema" ) cursor = connection.execute( sql.text(query).bindparams( sql.bindparam( "schema", util.text_type(schema.lower()), type_=sqltypes.Unicode, ) ) ) return bool(cursor.first()) def has_table(self, connection, table_name, schema=None): # seems like case gets folded in pg_class... if schema is None: cursor = connection.execute( sql.text( "select relname from pg_class c join pg_namespace n on " "n.oid=c.relnamespace where " "pg_catalog.pg_table_is_visible(c.oid) " "and relname=:name" ).bindparams( sql.bindparam( "name", util.text_type(table_name), type_=sqltypes.Unicode, ) ) ) else: cursor = connection.execute( sql.text( "select relname from pg_class c join pg_namespace n on " "n.oid=c.relnamespace where n.nspname=:schema and " "relname=:name" ).bindparams( sql.bindparam( "name", util.text_type(table_name), type_=sqltypes.Unicode, ), sql.bindparam( "schema", util.text_type(schema), type_=sqltypes.Unicode, ), ) ) return bool(cursor.first()) def has_sequence(self, connection, sequence_name, schema=None): if schema is None: cursor = connection.execute( sql.text( "SELECT relname FROM pg_class c join pg_namespace n on " "n.oid=c.relnamespace where relkind='S' and " "n.nspname=current_schema() " "and relname=:name" ).bindparams( sql.bindparam( "name", util.text_type(sequence_name), type_=sqltypes.Unicode, ) ) ) else: cursor = connection.execute( sql.text( "SELECT relname FROM pg_class c join pg_namespace n on " "n.oid=c.relnamespace where relkind='S' and " "n.nspname=:schema and relname=:name" ).bindparams( sql.bindparam( "name", util.text_type(sequence_name), type_=sqltypes.Unicode, ), sql.bindparam( "schema", util.text_type(schema), type_=sqltypes.Unicode, ), ) ) return bool(cursor.first()) def has_type(self, connection, type_name, schema=None): if schema is not None: query = """ SELECT EXISTS ( SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n WHERE t.typnamespace = n.oid AND t.typname = :typname AND n.nspname = :nspname ) """ query = sql.text(query) else: query = """ SELECT EXISTS ( SELECT * FROM pg_catalog.pg_type t WHERE t.typname = :typname AND pg_type_is_visible(t.oid) ) """ query = sql.text(query) query = query.bindparams( sql.bindparam( "typname", util.text_type(type_name), type_=sqltypes.Unicode ) ) if schema is not None: query = query.bindparams( sql.bindparam( "nspname", util.text_type(schema), type_=sqltypes.Unicode ) ) cursor = connection.execute(query) return bool(cursor.scalar()) def _get_server_version_info(self, connection): v = connection.execute("select version()").scalar() m = re.match( r".*(?:PostgreSQL|EnterpriseDB) " r"(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel|beta)?", v, ) if not m: raise AssertionError( "Could not determine version from string '%s'" % v ) return tuple([int(x) for x in m.group(1, 2, 3) if x is not None]) @reflection.cache def get_table_oid(self, connection, table_name, schema=None, **kw): """Fetch the oid for schema.table_name. Several reflection methods require the table oid. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_oid = None if schema is not None: schema_where_clause = "n.nspname = :schema" else: schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)" query = ( """ SELECT c.oid FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE (%s) AND c.relname = :table_name AND c.relkind in ('r', 'v', 'm', 'f', 'p') """ % schema_where_clause ) # Since we're binding to unicode, table_name and schema_name must be # unicode. table_name = util.text_type(table_name) if schema is not None: schema = util.text_type(schema) s = sql.text(query).bindparams(table_name=sqltypes.Unicode) s = s.columns(oid=sqltypes.Integer) if schema: s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode)) c = connection.execute(s, table_name=table_name, schema=schema) table_oid = c.scalar() if table_oid is None: raise exc.NoSuchTableError(table_name) return table_oid @reflection.cache def get_schema_names(self, connection, **kw): result = connection.execute( sql.text( "SELECT nspname FROM pg_namespace " "WHERE nspname NOT LIKE 'pg_%' " "ORDER BY nspname" ).columns(nspname=sqltypes.Unicode) ) return [name for name, in result] @reflection.cache def get_table_names(self, connection, schema=None, **kw): result = connection.execute( sql.text( "SELECT c.relname FROM pg_class c " "JOIN pg_namespace n ON n.oid = c.relnamespace " "WHERE n.nspname = :schema AND c.relkind in ('r', 'p')" ).columns(relname=sqltypes.Unicode), schema=schema if schema is not None else self.default_schema_name, ) return [name for name, in result] @reflection.cache def _get_foreign_table_names(self, connection, schema=None, **kw): result = connection.execute( sql.text( "SELECT c.relname FROM pg_class c " "JOIN pg_namespace n ON n.oid = c.relnamespace " "WHERE n.nspname = :schema AND c.relkind = 'f'" ).columns(relname=sqltypes.Unicode), schema=schema if schema is not None else self.default_schema_name, ) return [name for name, in result] @reflection.cache def get_view_names( self, connection, schema=None, include=("plain", "materialized"), **kw ): include_kind = {"plain": "v", "materialized": "m"} try: kinds = [include_kind[i] for i in util.to_list(include)] except KeyError: raise ValueError( "include %r unknown, needs to be a sequence containing " "one or both of 'plain' and 'materialized'" % (include,) ) if not kinds: raise ValueError( "empty include, needs to be a sequence containing " "one or both of 'plain' and 'materialized'" ) result = connection.execute( sql.text( "SELECT c.relname FROM pg_class c " "JOIN pg_namespace n ON n.oid = c.relnamespace " "WHERE n.nspname = :schema AND c.relkind IN (%s)" % (", ".join("'%s'" % elem for elem in kinds)) ).columns(relname=sqltypes.Unicode), schema=schema if schema is not None else self.default_schema_name, ) return [name for name, in result] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): view_def = connection.scalar( sql.text( "SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c " "JOIN pg_namespace n ON n.oid = c.relnamespace " "WHERE n.nspname = :schema AND c.relname = :view_name " "AND c.relkind IN ('v', 'm')" ).columns(view_def=sqltypes.Unicode), schema=schema if schema is not None else self.default_schema_name, view_name=view_name, ) return view_def @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): table_oid = self.get_table_oid( connection, table_name, schema, info_cache=kw.get("info_cache") ) generated = ( "a.attgenerated as generated" if self.server_version_info >= (12,) else "NULL as generated" ) SQL_COLS = ( """ SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) AS DEFAULT, a.attnotnull, a.attnum, a.attrelid as table_oid, pgd.description as comment, %s FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_description pgd ON ( pgd.objoid = a.attrelid AND pgd.objsubid = a.attnum) WHERE a.attrelid = :table_oid AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum """ % generated ) s = ( sql.text(SQL_COLS) .bindparams(sql.bindparam("table_oid", type_=sqltypes.Integer)) .columns(attname=sqltypes.Unicode, default=sqltypes.Unicode) ) c = connection.execute(s, table_oid=table_oid) rows = c.fetchall() # dictionary with (name, ) if default search path or (schema, name) # as keys domains = self._load_domains(connection) # dictionary with (name, ) if default search path or (schema, name) # as keys enums = dict( ((rec["name"],), rec) if rec["visible"] else ((rec["schema"], rec["name"]), rec) for rec in self._load_enums(connection, schema="*") ) # format columns columns = [] for ( name, format_type, default_, notnull, attnum, table_oid, comment, generated, ) in rows: column_info = self._get_column_info( name, format_type, default_, notnull, domains, enums, schema, comment, generated, ) columns.append(column_info) return columns def _get_column_info( self, name, format_type, default, notnull, domains, enums, schema, comment, generated, ): def _handle_array_type(attype): return ( # strip '[]' from integer[], etc. re.sub(r"\[\]$", "", attype), attype.endswith("[]"), ) # strip (*) from character varying(5), timestamp(5) # with time zone, geometry(POLYGON), etc. attype = re.sub(r"\(.*\)", "", format_type) # strip '[]' from integer[], etc. and check if an array attype, is_array = _handle_array_type(attype) # strip quotes from case sensitive enum or domain names enum_or_domain_key = tuple(util.quoted_token_parser(attype)) nullable = not notnull charlen = re.search(r"\(([\d,]+)\)", format_type) if charlen: charlen = charlen.group(1) args = re.search(r"\((.*)\)", format_type) if args and args.group(1): args = tuple(re.split(r"\s*,\s*", args.group(1))) else: args = () kwargs = {} if attype == "numeric": if charlen: prec, scale = charlen.split(",") args = (int(prec), int(scale)) else: args = () elif attype == "double precision": args = (53,) elif attype == "integer": args = () elif attype in ("timestamp with time zone", "time with time zone"): kwargs["timezone"] = True if charlen: kwargs["precision"] = int(charlen) args = () elif attype in ( "timestamp without time zone", "time without time zone", "time", ): kwargs["timezone"] = False if charlen: kwargs["precision"] = int(charlen) args = () elif attype == "bit varying": kwargs["varying"] = True if charlen: args = (int(charlen),) else: args = () elif attype.startswith("interval"): field_match = re.match(r"interval (.+)", attype, re.I) if charlen: kwargs["precision"] = int(charlen) if field_match: kwargs["fields"] = field_match.group(1) attype = "interval" args = () elif charlen: args = (int(charlen),) while True: # looping here to suit nested domains if attype in self.ischema_names: coltype = self.ischema_names[attype] break elif enum_or_domain_key in enums: enum = enums[enum_or_domain_key] coltype = ENUM kwargs["name"] = enum["name"] if not enum["visible"]: kwargs["schema"] = enum["schema"] args = tuple(enum["labels"]) break elif enum_or_domain_key in domains: domain = domains[enum_or_domain_key] attype = domain["attype"] attype, is_array = _handle_array_type(attype) # strip quotes from case sensitive enum or domain names enum_or_domain_key = tuple(util.quoted_token_parser(attype)) # A table can't override whether the domain is nullable. nullable = domain["nullable"] if domain["default"] and not default: # It can, however, override the default # value, but can't set it to null. default = domain["default"] continue else: coltype = None break if coltype: coltype = coltype(*args, **kwargs) if is_array: coltype = self.ischema_names["_array"](coltype) else: util.warn( "Did not recognize type '%s' of column '%s'" % (attype, name) ) coltype = sqltypes.NULLTYPE # If a zero byte (''), then not a generated column. # Otherwise, s = stored. (Other values might be added in the future.) if generated: computed = dict(sqltext=default, persisted=generated == "s") default = None else: computed = None # adjust the default value autoincrement = False if default is not None: match = re.search(r"""(nextval\(')([^']+)('.*$)""", default) if match is not None: if issubclass(coltype._type_affinity, sqltypes.Integer): autoincrement = True # the default is related to a Sequence sch = schema if "." not in match.group(2) and sch is not None: # unconditionally quote the schema name. this could # later be enhanced to obey quoting rules / # "quote schema" default = ( match.group(1) + ('"%s"' % sch) + "." + match.group(2) + match.group(3) ) column_info = dict( name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement, comment=comment, ) if computed is not None: column_info["computed"] = computed return column_info @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): table_oid = self.get_table_oid( connection, table_name, schema, info_cache=kw.get("info_cache") ) if self.server_version_info < (8, 4): PK_SQL = """ SELECT a.attname FROM pg_class t join pg_index ix on t.oid = ix.indrelid join pg_attribute a on t.oid=a.attrelid AND %s WHERE t.oid = :table_oid and ix.indisprimary = 't' ORDER BY a.attnum """ % self._pg_index_any( "a.attnum", "ix.indkey" ) else: # unnest() and generate_subscripts() both introduced in # version 8.4 PK_SQL = """ SELECT a.attname FROM pg_attribute a JOIN ( SELECT unnest(ix.indkey) attnum, generate_subscripts(ix.indkey, 1) ord FROM pg_index ix WHERE ix.indrelid = :table_oid AND ix.indisprimary ) k ON a.attnum=k.attnum WHERE a.attrelid = :table_oid ORDER BY k.ord """ t = sql.text(PK_SQL).columns(attname=sqltypes.Unicode) c = connection.execute(t, table_oid=table_oid) cols = [r[0] for r in c.fetchall()] PK_CONS_SQL = """ SELECT conname FROM pg_catalog.pg_constraint r WHERE r.conrelid = :table_oid AND r.contype = 'p' ORDER BY 1 """ t = sql.text(PK_CONS_SQL).columns(conname=sqltypes.Unicode) c = connection.execute(t, table_oid=table_oid) name = c.scalar() return {"constrained_columns": cols, "name": name} @reflection.cache def get_foreign_keys( self, connection, table_name, schema=None, postgresql_ignore_search_path=False, **kw ): preparer = self.identifier_preparer table_oid = self.get_table_oid( connection, table_name, schema, info_cache=kw.get("info_cache") ) FK_SQL = """ SELECT r.conname, pg_catalog.pg_get_constraintdef(r.oid, true) as condef, n.nspname as conschema FROM pg_catalog.pg_constraint r, pg_namespace n, pg_class c WHERE r.conrelid = :table AND r.contype = 'f' AND c.oid = confrelid AND n.oid = c.relnamespace ORDER BY 1 """ # http://www.postgresql.org/docs/9.0/static/sql-createtable.html FK_REGEX = re.compile( r"FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)" r"[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?" r"[\s]?(ON UPDATE " r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?" r"[\s]?(ON DELETE " r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?" r"[\s]?(DEFERRABLE|NOT DEFERRABLE)?" r"[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?" ) t = sql.text(FK_SQL).columns( conname=sqltypes.Unicode, condef=sqltypes.Unicode ) c = connection.execute(t, table=table_oid) fkeys = [] for conname, condef, conschema in c.fetchall(): m = re.search(FK_REGEX, condef).groups() ( constrained_columns, referred_schema, referred_table, referred_columns, _, match, _, onupdate, _, ondelete, deferrable, _, initially, ) = m if deferrable is not None: deferrable = True if deferrable == "DEFERRABLE" else False constrained_columns = [ preparer._unquote_identifier(x) for x in re.split(r"\s*,\s*", constrained_columns) ] if postgresql_ignore_search_path: # when ignoring search path, we use the actual schema # provided it isn't the "default" schema if conschema != self.default_schema_name: referred_schema = conschema else: referred_schema = schema elif referred_schema: # referred_schema is the schema that we regexp'ed from # pg_get_constraintdef(). If the schema is in the search # path, pg_get_constraintdef() will give us None. referred_schema = preparer._unquote_identifier(referred_schema) elif schema is not None and schema == conschema: # If the actual schema matches the schema of the table # we're reflecting, then we will use that. referred_schema = schema referred_table = preparer._unquote_identifier(referred_table) referred_columns = [ preparer._unquote_identifier(x) for x in re.split(r"\s*,\s", referred_columns) ] fkey_d = { "name": conname, "constrained_columns": constrained_columns, "referred_schema": referred_schema, "referred_table": referred_table, "referred_columns": referred_columns, "options": { "onupdate": onupdate, "ondelete": ondelete, "deferrable": deferrable, "initially": initially, "match": match, }, } fkeys.append(fkey_d) return fkeys def _pg_index_any(self, col, compare_to): if self.server_version_info < (8, 1): # http://www.postgresql.org/message-id/10279.1124395722@sss.pgh.pa.us # "In CVS tip you could replace this with "attnum = ANY (indkey)". # Unfortunately, most array support doesn't work on int2vector in # pre-8.1 releases, so I think you're kinda stuck with the above # for now. # regards, tom lane" return "(%s)" % " OR ".join( "%s[%d] = %s" % (compare_to, ind, col) for ind in range(0, 10) ) else: return "%s = ANY(%s)" % (col, compare_to) @reflection.cache def get_indexes(self, connection, table_name, schema, **kw): table_oid = self.get_table_oid( connection, table_name, schema, info_cache=kw.get("info_cache") ) # cast indkey as varchar since it's an int2vector, # returned as a list by some drivers such as pypostgresql if self.server_version_info < (8, 5): IDX_SQL = """ SELECT i.relname as relname, ix.indisunique, ix.indexprs, ix.indpred, a.attname, a.attnum, NULL, ix.indkey%s, %s, %s, am.amname, NULL as indnkeyatts FROM pg_class t join pg_index ix on t.oid = ix.indrelid join pg_class i on i.oid = ix.indexrelid left outer join pg_attribute a on t.oid = a.attrelid and %s left outer join pg_am am on i.relam = am.oid WHERE t.relkind IN ('r', 'v', 'f', 'm') and t.oid = :table_oid and ix.indisprimary = 'f' ORDER BY t.relname, i.relname """ % ( # version 8.3 here was based on observing the # cast does not work in PG 8.2.4, does work in 8.3.0. # nothing in PG changelogs regarding this. "::varchar" if self.server_version_info >= (8, 3) else "", "ix.indoption::varchar" if self.server_version_info >= (8, 3) else "NULL", "i.reloptions" if self.server_version_info >= (8, 2) else "NULL", self._pg_index_any("a.attnum", "ix.indkey"), ) else: IDX_SQL = """ SELECT i.relname as relname, ix.indisunique, ix.indexprs, ix.indpred, a.attname, a.attnum, c.conrelid, ix.indkey::varchar, ix.indoption::varchar, i.reloptions, am.amname, %s as indnkeyatts FROM pg_class t join pg_index ix on t.oid = ix.indrelid join pg_class i on i.oid = ix.indexrelid left outer join pg_attribute a on t.oid = a.attrelid and a.attnum = ANY(ix.indkey) left outer join pg_constraint c on (ix.indrelid = c.conrelid and ix.indexrelid = c.conindid and c.contype in ('p', 'u', 'x')) left outer join pg_am am on i.relam = am.oid WHERE t.relkind IN ('r', 'v', 'f', 'm', 'p') and t.oid = :table_oid and ix.indisprimary = 'f' ORDER BY t.relname, i.relname """ % ( "ix.indnkeyatts" if self.server_version_info >= (11, 0) else "NULL", ) t = sql.text(IDX_SQL).columns( relname=sqltypes.Unicode, attname=sqltypes.Unicode ) c = connection.execute(t, table_oid=table_oid) indexes = defaultdict(lambda: defaultdict(dict)) sv_idx_name = None for row in c.fetchall(): ( idx_name, unique, expr, prd, col, col_num, conrelid, idx_key, idx_option, options, amname, indnkeyatts, ) = row if expr: if idx_name != sv_idx_name: util.warn( "Skipped unsupported reflection of " "expression-based index %s" % idx_name ) sv_idx_name = idx_name continue if prd and not idx_name == sv_idx_name: util.warn( "Predicate of partial index %s ignored during reflection" % idx_name ) sv_idx_name = idx_name has_idx = idx_name in indexes index = indexes[idx_name] if col is not None: index["cols"][col_num] = col if not has_idx: idx_keys = idx_key.split() # "The number of key columns in the index, not counting any # included columns, which are merely stored and do not # participate in the index semantics" if indnkeyatts and idx_keys[indnkeyatts:]: util.warn( "INCLUDE columns for covering index %s " "ignored during reflection" % (idx_name,) ) idx_keys = idx_keys[:indnkeyatts] index["key"] = [int(k.strip()) for k in idx_keys] # (new in pg 8.3) # "pg_index.indoption" is list of ints, one per column/expr. # int acts as bitmask: 0x01=DESC, 0x02=NULLSFIRST sorting = {} for col_idx, col_flags in enumerate( (idx_option or "").split() ): col_flags = int(col_flags.strip()) col_sorting = () # try to set flags only if they differ from PG defaults... if col_flags & 0x01: col_sorting += ("desc",) if not (col_flags & 0x02): col_sorting += ("nullslast",) else: if col_flags & 0x02: col_sorting += ("nullsfirst",) if col_sorting: sorting[col_idx] = col_sorting if sorting: index["sorting"] = sorting index["unique"] = unique if conrelid is not None: index["duplicates_constraint"] = idx_name if options: index["options"] = dict( [option.split("=") for option in options] ) # it *might* be nice to include that this is 'btree' in the # reflection info. But we don't want an Index object # to have a ``postgresql_using`` in it that is just the # default, so for the moment leaving this out. if amname and amname != "btree": index["amname"] = amname result = [] for name, idx in indexes.items(): entry = { "name": name, "unique": idx["unique"], "column_names": [idx["cols"][i] for i in idx["key"]], } if "duplicates_constraint" in idx: entry["duplicates_constraint"] = idx["duplicates_constraint"] if "sorting" in idx: entry["column_sorting"] = dict( (idx["cols"][idx["key"][i]], value) for i, value in idx["sorting"].items() ) if "options" in idx: entry.setdefault("dialect_options", {})[ "postgresql_with" ] = idx["options"] if "amname" in idx: entry.setdefault("dialect_options", {})[ "postgresql_using" ] = idx["amname"] result.append(entry) return result @reflection.cache def get_unique_constraints( self, connection, table_name, schema=None, **kw ): table_oid = self.get_table_oid( connection, table_name, schema, info_cache=kw.get("info_cache") ) UNIQUE_SQL = """ SELECT cons.conname as name, cons.conkey as key, a.attnum as col_num, a.attname as col_name FROM pg_catalog.pg_constraint cons join pg_attribute a on cons.conrelid = a.attrelid AND a.attnum = ANY(cons.conkey) WHERE cons.conrelid = :table_oid AND cons.contype = 'u' """ t = sql.text(UNIQUE_SQL).columns(col_name=sqltypes.Unicode) c = connection.execute(t, table_oid=table_oid) uniques = defaultdict(lambda: defaultdict(dict)) for row in c.fetchall(): uc = uniques[row.name] uc["key"] = row.key uc["cols"][row.col_num] = row.col_name return [ {"name": name, "column_names": [uc["cols"][i] for i in uc["key"]]} for name, uc in uniques.items() ] @reflection.cache def get_table_comment(self, connection, table_name, schema=None, **kw): table_oid = self.get_table_oid( connection, table_name, schema, info_cache=kw.get("info_cache") ) COMMENT_SQL = """ SELECT pgd.description as table_comment FROM pg_catalog.pg_description pgd WHERE pgd.objsubid = 0 AND pgd.objoid = :table_oid """ c = connection.execute(sql.text(COMMENT_SQL), table_oid=table_oid) return {"text": c.scalar()} @reflection.cache def get_check_constraints(self, connection, table_name, schema=None, **kw): table_oid = self.get_table_oid( connection, table_name, schema, info_cache=kw.get("info_cache") ) CHECK_SQL = """ SELECT cons.conname as name, pg_get_constraintdef(cons.oid) as src FROM pg_catalog.pg_constraint cons WHERE cons.conrelid = :table_oid AND cons.contype = 'c' """ c = connection.execute(sql.text(CHECK_SQL), table_oid=table_oid) ret = [] for name, src in c: # samples: # "CHECK (((a > 1) AND (a < 5)))" # "CHECK (((a = 1) OR ((a > 2) AND (a < 5))))" # "CHECK (((a > 1) AND (a < 5))) NOT VALID" # "CHECK (some_boolean_function(a))" # "CHECK (((a\n < 1)\n OR\n (a\n >= 5))\n)" m = re.match( r"^CHECK *\((.+)\)( NOT VALID)?$", src, flags=re.DOTALL ) if not m: util.warn("Could not parse CHECK constraint text: %r" % src) sqltext = "" else: sqltext = re.compile( r"^[\s\n]*\((.+)\)[\s\n]*$", flags=re.DOTALL ).sub(r"\1", m.group(1)) entry = {"name": name, "sqltext": sqltext} if m and m.group(2): entry["dialect_options"] = {"not_valid": True} ret.append(entry) return ret def _load_enums(self, connection, schema=None): schema = schema or self.default_schema_name if not self.supports_native_enum: return {} # Load data types for enums: SQL_ENUMS = """ SELECT t.typname as "name", -- no enum defaults in 8.4 at least -- t.typdefault as "default", pg_catalog.pg_type_is_visible(t.oid) as "visible", n.nspname as "schema", e.enumlabel as "label" FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid WHERE t.typtype = 'e' """ if schema != "*": SQL_ENUMS += "AND n.nspname = :schema " # e.oid gives us label order within an enum SQL_ENUMS += 'ORDER BY "schema", "name", e.oid' s = sql.text(SQL_ENUMS).columns( attname=sqltypes.Unicode, label=sqltypes.Unicode ) if schema != "*": s = s.bindparams(schema=schema) c = connection.execute(s) enums = [] enum_by_name = {} for enum in c.fetchall(): key = (enum["schema"], enum["name"]) if key in enum_by_name: enum_by_name[key]["labels"].append(enum["label"]) else: enum_by_name[key] = enum_rec = { "name": enum["name"], "schema": enum["schema"], "visible": enum["visible"], "labels": [], } if enum["label"] is not None: enum_rec["labels"].append(enum["label"]) enums.append(enum_rec) return enums def _load_domains(self, connection): # Load data types for domains: SQL_DOMAINS = """ SELECT t.typname as "name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype", not t.typnotnull as "nullable", t.typdefault as "default", pg_catalog.pg_type_is_visible(t.oid) as "visible", n.nspname as "schema" FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE t.typtype = 'd' """ s = sql.text(SQL_DOMAINS).columns(attname=sqltypes.Unicode) c = connection.execute(s) domains = {} for domain in c.fetchall(): # strip (30) from character varying(30) attype = re.search(r"([^\(]+)", domain["attype"]).group(1) # 'visible' just means whether or not the domain is in a # schema that's on the search path -- or not overridden by # a schema with higher precedence. If it's not visible, # it will be prefixed with the schema-name when it's used. if domain["visible"]: key = (domain["name"],) else: key = (domain["schema"], domain["name"]) domains[key] = { "attype": attype, "nullable": domain["nullable"], "default": domain["default"], } return domains
0
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/sqlalchemy/dialects/postgresql/ext.py
# postgresql/ext.py # Copyright (C) 2005-2020 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .array import ARRAY from ...sql import elements from ...sql import expression from ...sql import functions from ...sql.schema import ColumnCollectionConstraint class aggregate_order_by(expression.ColumnElement): """Represent a PostgreSQL aggregate order by expression. E.g.:: from sqlalchemy.dialects.postgresql import aggregate_order_by expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc())) stmt = select([expr]) would represent the expression:: SELECT array_agg(a ORDER BY b DESC) FROM table; Similarly:: expr = func.string_agg( table.c.a, aggregate_order_by(literal_column("','"), table.c.a) ) stmt = select([expr]) Would represent:: SELECT string_agg(a, ',' ORDER BY a) FROM table; .. versionadded:: 1.1 .. versionchanged:: 1.2.13 - the ORDER BY argument may be multiple terms .. seealso:: :class:`_functions.array_agg` """ __visit_name__ = "aggregate_order_by" def __init__(self, target, *order_by): self.target = elements._literal_as_binds(target) _lob = len(order_by) if _lob == 0: raise TypeError("at least one ORDER BY element is required") elif _lob == 1: self.order_by = elements._literal_as_binds(order_by[0]) else: self.order_by = elements.ClauseList( *order_by, _literal_as_text=elements._literal_as_binds ) def self_group(self, against=None): return self def get_children(self, **kwargs): return self.target, self.order_by def _copy_internals(self, clone=elements._clone, **kw): self.target = clone(self.target, **kw) self.order_by = clone(self.order_by, **kw) @property def _from_objects(self): return self.target._from_objects + self.order_by._from_objects class ExcludeConstraint(ColumnCollectionConstraint): """A table-level EXCLUDE constraint. Defines an EXCLUDE constraint as described in the `postgres documentation`__. __ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE """ # noqa __visit_name__ = "exclude_constraint" where = None @elements._document_text_coercion( "where", ":class:`.ExcludeConstraint`", ":paramref:`.ExcludeConstraint.where`", ) def __init__(self, *elements, **kw): r""" Create an :class:`.ExcludeConstraint` object. E.g.:: const = ExcludeConstraint( (Column('period'), '&&'), (Column('group'), '='), where=(Column('group') != 'some group') ) The constraint is normally embedded into the :class:`_schema.Table` construct directly, or added later using :meth:`.append_constraint`:: some_table = Table( 'some_table', metadata, Column('id', Integer, primary_key=True), Column('period', TSRANGE()), Column('group', String) ) some_table.append_constraint( ExcludeConstraint( (some_table.c.period, '&&'), (some_table.c.group, '='), where=some_table.c.group != 'some group', name='some_table_excl_const' ) ) :param \*elements: A sequence of two tuples of the form ``(column, operator)`` where "column" is a SQL expression element or a raw SQL string, most typically a :class:`_schema.Column` object, and "operator" is a string containing the operator to use. In order to specify a column name when a :class:`_schema.Column` object is not available, while ensuring that any necessary quoting rules take effect, an ad-hoc :class:`_schema.Column` or :func:`_expression.column` object should be used. :param name: Optional, the in-database name of this constraint. :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: Optional string. If set, emit INITIALLY <value> when issuing DDL for this constraint. :param using: Optional string. If set, emit USING <index_method> when issuing DDL for this constraint. Defaults to 'gist'. :param where: Optional SQL expression construct or literal SQL string. If set, emit WHERE <predicate> when issuing DDL for this constraint. """ columns = [] render_exprs = [] self.operators = {} expressions, operators = zip(*elements) for (expr, column, strname, add_element), operator in zip( self._extract_col_expression_collection(expressions), operators ): if add_element is not None: columns.append(add_element) name = column.name if column is not None else strname if name is not None: # backwards compat self.operators[name] = operator expr = expression._literal_as_column(expr) render_exprs.append((expr, name, operator)) self._render_exprs = render_exprs ColumnCollectionConstraint.__init__( self, *columns, name=kw.get("name"), deferrable=kw.get("deferrable"), initially=kw.get("initially") ) self.using = kw.get("using", "gist") where = kw.get("where") if where is not None: self.where = expression._literal_as_text( where, allow_coercion_to_text=True ) def copy(self, **kw): elements = [(col, self.operators[col]) for col in self.columns.keys()] c = self.__class__( *elements, name=self.name, deferrable=self.deferrable, initially=self.initially, where=self.where, using=self.using ) c.dispatch._update(self.dispatch) return c def array_agg(*arg, **kw): """PostgreSQL-specific form of :class:`_functions.array_agg`, ensures return type is :class:`_postgresql.ARRAY` and not the plain :class:`_types.ARRAY`, unless an explicit ``type_`` is passed. .. versionadded:: 1.1 """ kw["_default_array_type"] = ARRAY return functions.func.array_agg(*arg, **kw)
0