diff --git a/testbed/django__django/django/contrib/sessions/__init__.py b/testbed/django__django/django/contrib/sessions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/django__django/django/contrib/sessions/apps.py b/testbed/django__django/django/contrib/sessions/apps.py new file mode 100644 index 0000000000000000000000000000000000000000..83f0cefa2d9a5fc13125b8e68af89904e46c4d6b --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig +from django.utils.translation import gettext_lazy as _ + + +class SessionsConfig(AppConfig): + name = "django.contrib.sessions" + verbose_name = _("Sessions") diff --git a/testbed/django__django/django/contrib/sessions/base_session.py b/testbed/django__django/django/contrib/sessions/base_session.py new file mode 100644 index 0000000000000000000000000000000000000000..603d2fe12cd4569049832ece1f298f05e4a45613 --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/base_session.py @@ -0,0 +1,47 @@ +""" +This module allows importing AbstractBaseSession even +when django.contrib.sessions is not in INSTALLED_APPS. +""" +from django.db import models +from django.utils.translation import gettext_lazy as _ + + +class BaseSessionManager(models.Manager): + def encode(self, session_dict): + """ + Return the given session dictionary serialized and encoded as a string. + """ + session_store_class = self.model.get_session_store_class() + return session_store_class().encode(session_dict) + + def save(self, session_key, session_dict, expire_date): + s = self.model(session_key, self.encode(session_dict), expire_date) + if session_dict: + s.save() + else: + s.delete() # Clear sessions with no data. + return s + + +class AbstractBaseSession(models.Model): + session_key = models.CharField(_("session key"), max_length=40, primary_key=True) + session_data = models.TextField(_("session data")) + expire_date = models.DateTimeField(_("expire date"), db_index=True) + + objects = BaseSessionManager() + + class Meta: + abstract = True + verbose_name = _("session") + verbose_name_plural = _("sessions") + + def __str__(self): + return self.session_key + + @classmethod + def get_session_store_class(cls): + raise NotImplementedError + + def get_decoded(self): + session_store_class = self.get_session_store_class() + return session_store_class().decode(self.session_data) diff --git a/testbed/django__django/django/contrib/sessions/exceptions.py b/testbed/django__django/django/contrib/sessions/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..8a4853c50249d9adfc3a7589394fa527642b4b8b --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/exceptions.py @@ -0,0 +1,19 @@ +from django.core.exceptions import BadRequest, SuspiciousOperation + + +class InvalidSessionKey(SuspiciousOperation): + """Invalid characters in session key""" + + pass + + +class SuspiciousSession(SuspiciousOperation): + """The session may be tampered with""" + + pass + + +class SessionInterrupted(BadRequest): + """The session was interrupted.""" + + pass diff --git a/testbed/django__django/django/contrib/sessions/locale/ckb/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sessions/locale/ckb/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..24a4cc6c6dc4bc890c457ae40b2a1f9ad1469643 Binary files /dev/null and b/testbed/django__django/django/contrib/sessions/locale/ckb/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sessions/locale/ckb/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sessions/locale/ckb/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..fb8d23a58b5ec0d9a264b6f3471e47b7fa19aa24 --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/locale/ckb/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# kosar tofiq , 2020 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2023-04-24 18:26+0000\n" +"Last-Translator: kosar tofiq , 2020\n" +"Language-Team: Central Kurdish (http://www.transifex.com/django/django/" +"language/ckb/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ckb\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sessions" +msgstr "کۆڕەکان" + +msgid "session key" +msgstr "کلیلی کۆڕ" + +msgid "session data" +msgstr "دراوەی کۆڕ" + +msgid "expire date" +msgstr "بەرواری بەسەرچوون" + +msgid "session" +msgstr "کۆڕ" + +msgid "sessions" +msgstr "کۆڕەکان" diff --git a/testbed/django__django/django/contrib/sessions/locale/tt/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sessions/locale/tt/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..d74b9f417302a601415f3f577b8709567f6e2a8e --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/locale/tt/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Azat Khasanshin , 2011 +# v_ildar , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Tatar (http://www.transifex.com/django/django/language/tt/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tt\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +msgid "Sessions" +msgstr "Эш сеанслары" + +msgid "session key" +msgstr "эш сеансы ачкычы" + +msgid "session data" +msgstr "эш сеансы бирелмәләре" + +msgid "expire date" +msgstr "искерү көне" + +msgid "session" +msgstr "эш сеансы" + +msgid "sessions" +msgstr "эш сеанслары" diff --git a/testbed/django__django/django/contrib/sessions/locale/udm/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sessions/locale/udm/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..af7ab53bb6735a670114ecf738e973e4f5831251 Binary files /dev/null and b/testbed/django__django/django/contrib/sessions/locale/udm/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sessions/locale/uk/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sessions/locale/uk/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..befabf065399392f7994bdb8ea3bc3ef86b68db5 --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/locale/uk/LC_MESSAGES/django.po @@ -0,0 +1,40 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Oleksandr Chernihov , 2014 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Mykola Zamkovoi \n" +"Language-Team: Ukrainian (http://www.transifex.com/django/django/language/" +"uk/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: uk\n" +"Plural-Forms: nplurals=4; plural=(n % 1 == 0 && n % 10 == 1 && n % 100 != " +"11 ? 0 : n % 1 == 0 && n % 10 >= 2 && n % 10 <= 4 && (n % 100 < 12 || n % " +"100 > 14) ? 1 : n % 1 == 0 && (n % 10 ==0 || (n % 10 >=5 && n % 10 <=9) || " +"(n % 100 >=11 && n % 100 <=14 )) ? 2: 3);\n" + +msgid "Sessions" +msgstr "Сесії" + +msgid "session key" +msgstr "ключ сесії" + +msgid "session data" +msgstr "дані сесії" + +msgid "expire date" +msgstr "термін придатності" + +msgid "session" +msgstr "сесія" + +msgid "sessions" +msgstr "сесії" diff --git a/testbed/django__django/django/contrib/sessions/locale/ur/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sessions/locale/ur/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..62f625561bd46adb5f45529f72267daa63938a2b --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/locale/ur/LC_MESSAGES/django.po @@ -0,0 +1,35 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Mansoorulhaq Mansoor , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Urdu (http://www.transifex.com/django/django/language/ur/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ur\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sessions" +msgstr "" + +msgid "session key" +msgstr "کلید نشست" + +msgid "session data" +msgstr "نشست کا ڈیٹا" + +msgid "expire date" +msgstr "مدت ختم ھونے کی تاریخ" + +msgid "session" +msgstr "نشست" + +msgid "sessions" +msgstr "نشستیں" diff --git a/testbed/django__django/django/contrib/sessions/locale/uz/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sessions/locale/uz/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..9346a6c382af951824992dbee8d3482a1670aefe Binary files /dev/null and b/testbed/django__django/django/contrib/sessions/locale/uz/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sessions/locale/vi/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sessions/locale/vi/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..69b82fd33a4128ce356ed31738d577305ea28117 Binary files /dev/null and b/testbed/django__django/django/contrib/sessions/locale/vi/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sessions/locale/zh_Hant/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sessions/locale/zh_Hant/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..ad835229439f245b21807e1af47d5a89f469f3f9 Binary files /dev/null and b/testbed/django__django/django/contrib/sessions/locale/zh_Hant/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sessions/locale/zh_Hant/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sessions/locale/zh_Hant/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..ef202b9f41a76e399836b2188b2ed5c9a58f7172 --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/locale/zh_Hant/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Chen Chun-Chia , 2015 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Chen Chun-Chia \n" +"Language-Team: Chinese (Taiwan) (http://www.transifex.com/django/django/" +"language/zh_TW/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_TW\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +msgid "Sessions" +msgstr "Sessions" + +msgid "session key" +msgstr "session 鍵值" + +msgid "session data" +msgstr "session 資料" + +msgid "expire date" +msgstr "到期日期" + +msgid "session" +msgstr "session" + +msgid "sessions" +msgstr "sessions" diff --git a/testbed/django__django/django/contrib/sessions/management/__init__.py b/testbed/django__django/django/contrib/sessions/management/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/django__django/django/contrib/sessions/management/commands/__init__.py b/testbed/django__django/django/contrib/sessions/management/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/django__django/django/contrib/sessions/management/commands/clearsessions.py b/testbed/django__django/django/contrib/sessions/management/commands/clearsessions.py new file mode 100644 index 0000000000000000000000000000000000000000..899f537986b322af72f9cfebeed410edf17d191e --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/management/commands/clearsessions.py @@ -0,0 +1,21 @@ +from importlib import import_module + +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError + + +class Command(BaseCommand): + help = ( + "Can be run as a cronjob or directly to clean out expired sessions " + "when the backend supports it." + ) + + def handle(self, **options): + engine = import_module(settings.SESSION_ENGINE) + try: + engine.SessionStore.clear_expired() + except NotImplementedError: + raise CommandError( + "Session engine '%s' doesn't support clearing expired " + "sessions." % settings.SESSION_ENGINE + ) diff --git a/testbed/django__django/django/contrib/sessions/middleware.py b/testbed/django__django/django/contrib/sessions/middleware.py new file mode 100644 index 0000000000000000000000000000000000000000..9c934f9dddab659f383358021f256550affbe14f --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/middleware.py @@ -0,0 +1,77 @@ +import time +from importlib import import_module + +from django.conf import settings +from django.contrib.sessions.backends.base import UpdateError +from django.contrib.sessions.exceptions import SessionInterrupted +from django.utils.cache import patch_vary_headers +from django.utils.deprecation import MiddlewareMixin +from django.utils.http import http_date + + +class SessionMiddleware(MiddlewareMixin): + def __init__(self, get_response): + super().__init__(get_response) + engine = import_module(settings.SESSION_ENGINE) + self.SessionStore = engine.SessionStore + + def process_request(self, request): + session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME) + request.session = self.SessionStore(session_key) + + def process_response(self, request, response): + """ + If request.session was modified, or if the configuration is to save the + session every time, save the changes and set a session cookie or delete + the session cookie if the session has been emptied. + """ + try: + accessed = request.session.accessed + modified = request.session.modified + empty = request.session.is_empty() + except AttributeError: + return response + # First check if we need to delete this cookie. + # The session should be deleted only if the session is entirely empty. + if settings.SESSION_COOKIE_NAME in request.COOKIES and empty: + response.delete_cookie( + settings.SESSION_COOKIE_NAME, + path=settings.SESSION_COOKIE_PATH, + domain=settings.SESSION_COOKIE_DOMAIN, + samesite=settings.SESSION_COOKIE_SAMESITE, + ) + patch_vary_headers(response, ("Cookie",)) + else: + if accessed: + patch_vary_headers(response, ("Cookie",)) + if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty: + if request.session.get_expire_at_browser_close(): + max_age = None + expires = None + else: + max_age = request.session.get_expiry_age() + expires_time = time.time() + max_age + expires = http_date(expires_time) + # Save the session data and refresh the client cookie. + # Skip session save for 5xx responses. + if response.status_code < 500: + try: + request.session.save() + except UpdateError: + raise SessionInterrupted( + "The request's session was deleted before the " + "request completed. The user may have logged " + "out in a concurrent request, for example." + ) + response.set_cookie( + settings.SESSION_COOKIE_NAME, + request.session.session_key, + max_age=max_age, + expires=expires, + domain=settings.SESSION_COOKIE_DOMAIN, + path=settings.SESSION_COOKIE_PATH, + secure=settings.SESSION_COOKIE_SECURE or None, + httponly=settings.SESSION_COOKIE_HTTPONLY or None, + samesite=settings.SESSION_COOKIE_SAMESITE, + ) + return response diff --git a/testbed/django__django/django/contrib/sessions/migrations/0001_initial.py b/testbed/django__django/django/contrib/sessions/migrations/0001_initial.py new file mode 100644 index 0000000000000000000000000000000000000000..42e382aa3267f19bdf886f1d8062329c359cbd7d --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/migrations/0001_initial.py @@ -0,0 +1,37 @@ +import django.contrib.sessions.models +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [] + + operations = [ + migrations.CreateModel( + name="Session", + fields=[ + ( + "session_key", + models.CharField( + max_length=40, + serialize=False, + verbose_name="session key", + primary_key=True, + ), + ), + ("session_data", models.TextField(verbose_name="session data")), + ( + "expire_date", + models.DateTimeField(verbose_name="expire date", db_index=True), + ), + ], + options={ + "abstract": False, + "db_table": "django_session", + "verbose_name": "session", + "verbose_name_plural": "sessions", + }, + managers=[ + ("objects", django.contrib.sessions.models.SessionManager()), + ], + ), + ] diff --git a/testbed/django__django/django/contrib/sessions/migrations/__init__.py b/testbed/django__django/django/contrib/sessions/migrations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/django__django/django/contrib/sessions/models.py b/testbed/django__django/django/contrib/sessions/models.py new file mode 100644 index 0000000000000000000000000000000000000000..e786ab4eac4262a5e4b628e3e39dad4e224a546a --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/models.py @@ -0,0 +1,35 @@ +from django.contrib.sessions.base_session import AbstractBaseSession, BaseSessionManager + + +class SessionManager(BaseSessionManager): + use_in_migrations = True + + +class Session(AbstractBaseSession): + """ + Django provides full support for anonymous sessions. The session + framework lets you store and retrieve arbitrary data on a + per-site-visitor basis. It stores data on the server side and + abstracts the sending and receiving of cookies. Cookies contain a + session ID -- not the data itself. + + The Django sessions framework is entirely cookie-based. It does + not fall back to putting session IDs in URLs. This is an intentional + design decision. Not only does that behavior make URLs ugly, it makes + your site vulnerable to session-ID theft via the "Referer" header. + + For complete documentation on using Sessions in your code, consult + the sessions documentation that is shipped with Django (also available + on the Django web site). + """ + + objects = SessionManager() + + @classmethod + def get_session_store_class(cls): + from django.contrib.sessions.backends.db import SessionStore + + return SessionStore + + class Meta(AbstractBaseSession.Meta): + db_table = "django_session" diff --git a/testbed/django__django/django/contrib/sessions/serializers.py b/testbed/django__django/django/contrib/sessions/serializers.py new file mode 100644 index 0000000000000000000000000000000000000000..16a248cc65541324485df28be87571499f51d52c --- /dev/null +++ b/testbed/django__django/django/contrib/sessions/serializers.py @@ -0,0 +1,3 @@ +from django.core.signing import JSONSerializer as BaseJSONSerializer + +JSONSerializer = BaseJSONSerializer diff --git a/testbed/django__django/django/contrib/sitemaps/__init__.py b/testbed/django__django/django/contrib/sitemaps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dd21d8829ddbcb95d307e5f0ae449a0120bf1cb1 --- /dev/null +++ b/testbed/django__django/django/contrib/sitemaps/__init__.py @@ -0,0 +1,200 @@ +from django.apps import apps as django_apps +from django.conf import settings +from django.core import paginator +from django.core.exceptions import ImproperlyConfigured +from django.utils import translation + + +class Sitemap: + # This limit is defined by Google. See the index documentation at + # https://www.sitemaps.org/protocol.html#index. + limit = 50000 + + # If protocol is None, the URLs in the sitemap will use the protocol + # with which the sitemap was requested. + protocol = None + + # Enables generating URLs for all languages. + i18n = False + + # Override list of languages to use. + languages = None + + # Enables generating alternate/hreflang links. + alternates = False + + # Add an alternate/hreflang link with value 'x-default'. + x_default = False + + def _get(self, name, item, default=None): + try: + attr = getattr(self, name) + except AttributeError: + return default + if callable(attr): + if self.i18n: + # Split the (item, lang_code) tuples again for the location, + # priority, lastmod and changefreq method calls. + item, lang_code = item + return attr(item) + return attr + + def get_languages_for_item(self, item): + """Languages for which this item is displayed.""" + return self._languages() + + def _languages(self): + if self.languages is not None: + return self.languages + return [lang_code for lang_code, _ in settings.LANGUAGES] + + def _items(self): + if self.i18n: + # Create (item, lang_code) tuples for all items and languages. + # This is necessary to paginate with all languages already considered. + items = [ + (item, lang_code) + for item in self.items() + for lang_code in self.get_languages_for_item(item) + ] + return items + return self.items() + + def _location(self, item, force_lang_code=None): + if self.i18n: + obj, lang_code = item + # Activate language from item-tuple or forced one before calling location. + with translation.override(force_lang_code or lang_code): + return self._get("location", item) + return self._get("location", item) + + @property + def paginator(self): + return paginator.Paginator(self._items(), self.limit) + + def items(self): + return [] + + def location(self, item): + return item.get_absolute_url() + + def get_protocol(self, protocol=None): + # Determine protocol + return self.protocol or protocol or "https" + + def get_domain(self, site=None): + # Determine domain + if site is None: + if django_apps.is_installed("django.contrib.sites"): + Site = django_apps.get_model("sites.Site") + try: + site = Site.objects.get_current() + except Site.DoesNotExist: + pass + if site is None: + raise ImproperlyConfigured( + "To use sitemaps, either enable the sites framework or pass " + "a Site/RequestSite object in your view." + ) + return site.domain + + def get_urls(self, page=1, site=None, protocol=None): + protocol = self.get_protocol(protocol) + domain = self.get_domain(site) + return self._urls(page, protocol, domain) + + def get_latest_lastmod(self): + if not hasattr(self, "lastmod"): + return None + if callable(self.lastmod): + try: + return max([self.lastmod(item) for item in self.items()], default=None) + except TypeError: + return None + else: + return self.lastmod + + def _urls(self, page, protocol, domain): + urls = [] + latest_lastmod = None + all_items_lastmod = True # track if all items have a lastmod + + paginator_page = self.paginator.page(page) + for item in paginator_page.object_list: + loc = f"{protocol}://{domain}{self._location(item)}" + priority = self._get("priority", item) + lastmod = self._get("lastmod", item) + + if all_items_lastmod: + all_items_lastmod = lastmod is not None + if all_items_lastmod and ( + latest_lastmod is None or lastmod > latest_lastmod + ): + latest_lastmod = lastmod + + url_info = { + "item": item, + "location": loc, + "lastmod": lastmod, + "changefreq": self._get("changefreq", item), + "priority": str(priority if priority is not None else ""), + "alternates": [], + } + + if self.i18n and self.alternates: + item_languages = self.get_languages_for_item(item[0]) + for lang_code in item_languages: + loc = f"{protocol}://{domain}{self._location(item, lang_code)}" + url_info["alternates"].append( + { + "location": loc, + "lang_code": lang_code, + } + ) + if self.x_default and settings.LANGUAGE_CODE in item_languages: + lang_code = settings.LANGUAGE_CODE + loc = f"{protocol}://{domain}{self._location(item, lang_code)}" + loc = loc.replace(f"/{lang_code}/", "/", 1) + url_info["alternates"].append( + { + "location": loc, + "lang_code": "x-default", + } + ) + + urls.append(url_info) + + if all_items_lastmod and latest_lastmod: + self.latest_lastmod = latest_lastmod + + return urls + + +class GenericSitemap(Sitemap): + priority = None + changefreq = None + + def __init__(self, info_dict, priority=None, changefreq=None, protocol=None): + self.queryset = info_dict["queryset"] + self.date_field = info_dict.get("date_field") + self.priority = self.priority or priority + self.changefreq = self.changefreq or changefreq + self.protocol = self.protocol or protocol + + def items(self): + # Make sure to return a clone; we don't want premature evaluation. + return self.queryset.filter() + + def lastmod(self, item): + if self.date_field is not None: + return getattr(item, self.date_field) + return None + + def get_latest_lastmod(self): + if self.date_field is not None: + return ( + self.queryset.order_by("-" + self.date_field) + .values_list(self.date_field, flat=True) + .first() + ) + return None diff --git a/testbed/django__django/django/contrib/sitemaps/apps.py b/testbed/django__django/django/contrib/sitemaps/apps.py new file mode 100644 index 0000000000000000000000000000000000000000..70c200c63c0c34dac93db5527414307ffc41ec4f --- /dev/null +++ b/testbed/django__django/django/contrib/sitemaps/apps.py @@ -0,0 +1,8 @@ +from django.apps import AppConfig +from django.utils.translation import gettext_lazy as _ + + +class SiteMapsConfig(AppConfig): + default_auto_field = "django.db.models.AutoField" + name = "django.contrib.sitemaps" + verbose_name = _("Site Maps") diff --git a/testbed/django__django/django/contrib/sitemaps/templates/sitemap_index.xml b/testbed/django__django/django/contrib/sitemaps/templates/sitemap_index.xml new file mode 100644 index 0000000000000000000000000000000000000000..b7abd12b33cbaffb7d179b1c336f6aec2d387045 --- /dev/null +++ b/testbed/django__django/django/contrib/sitemaps/templates/sitemap_index.xml @@ -0,0 +1,13 @@ + + +{% spaceless %} +{% for site in sitemaps %} + + {{ site.location }} + {% if site.last_mod %} + {{ site.last_mod|date:"c" }} + {% endif %} + +{% endfor %} +{% endspaceless %} + diff --git a/testbed/django__django/django/contrib/sitemaps/views.py b/testbed/django__django/django/contrib/sitemaps/views.py new file mode 100644 index 0000000000000000000000000000000000000000..166563b200ab49bc68155fba80c18e1d3d9352c0 --- /dev/null +++ b/testbed/django__django/django/contrib/sitemaps/views.py @@ -0,0 +1,140 @@ +import datetime +from dataclasses import dataclass +from functools import wraps + +from django.contrib.sites.shortcuts import get_current_site +from django.core.paginator import EmptyPage, PageNotAnInteger +from django.http import Http404 +from django.template.response import TemplateResponse +from django.urls import reverse +from django.utils import timezone +from django.utils.http import http_date + + +@dataclass +class SitemapIndexItem: + location: str + last_mod: bool = None + + +def x_robots_tag(func): + @wraps(func) + def inner(request, *args, **kwargs): + response = func(request, *args, **kwargs) + response.headers["X-Robots-Tag"] = "noindex, noodp, noarchive" + return response + + return inner + + +def _get_latest_lastmod(current_lastmod, new_lastmod): + """ + Returns the latest `lastmod` where `lastmod` can be either a date or a + datetime. + """ + if not isinstance(new_lastmod, datetime.datetime): + new_lastmod = datetime.datetime.combine(new_lastmod, datetime.time.min) + if timezone.is_naive(new_lastmod): + new_lastmod = timezone.make_aware(new_lastmod, datetime.timezone.utc) + return new_lastmod if current_lastmod is None else max(current_lastmod, new_lastmod) + + +@x_robots_tag +def index( + request, + sitemaps, + template_name="sitemap_index.xml", + content_type="application/xml", + sitemap_url_name="django.contrib.sitemaps.views.sitemap", +): + req_protocol = request.scheme + req_site = get_current_site(request) + + sites = [] # all sections' sitemap URLs + all_indexes_lastmod = True + latest_lastmod = None + for section, site in sitemaps.items(): + # For each section label, add links of all pages of its sitemap + # (usually generated by the `sitemap` view). + if callable(site): + site = site() + protocol = req_protocol if site.protocol is None else site.protocol + sitemap_url = reverse(sitemap_url_name, kwargs={"section": section}) + absolute_url = "%s://%s%s" % (protocol, req_site.domain, sitemap_url) + site_lastmod = site.get_latest_lastmod() + if all_indexes_lastmod: + if site_lastmod is not None: + latest_lastmod = _get_latest_lastmod(latest_lastmod, site_lastmod) + else: + all_indexes_lastmod = False + sites.append(SitemapIndexItem(absolute_url, site_lastmod)) + # Add links to all pages of the sitemap. + for page in range(2, site.paginator.num_pages + 1): + sites.append( + SitemapIndexItem("%s?p=%s" % (absolute_url, page), site_lastmod) + ) + # If lastmod is defined for all sites, set header so as + # ConditionalGetMiddleware is able to send 304 NOT MODIFIED + if all_indexes_lastmod and latest_lastmod: + headers = {"Last-Modified": http_date(latest_lastmod.timestamp())} + else: + headers = None + return TemplateResponse( + request, + template_name, + {"sitemaps": sites}, + content_type=content_type, + headers=headers, + ) + + +@x_robots_tag +def sitemap( + request, + sitemaps, + section=None, + template_name="sitemap.xml", + content_type="application/xml", +): + req_protocol = request.scheme + req_site = get_current_site(request) + + if section is not None: + if section not in sitemaps: + raise Http404("No sitemap available for section: %r" % section) + maps = [sitemaps[section]] + else: + maps = sitemaps.values() + page = request.GET.get("p", 1) + + lastmod = None + all_sites_lastmod = True + urls = [] + for site in maps: + try: + if callable(site): + site = site() + urls.extend(site.get_urls(page=page, site=req_site, protocol=req_protocol)) + if all_sites_lastmod: + site_lastmod = getattr(site, "latest_lastmod", None) + if site_lastmod is not None: + lastmod = _get_latest_lastmod(lastmod, site_lastmod) + else: + all_sites_lastmod = False + except EmptyPage: + raise Http404("Page %s empty" % page) + except PageNotAnInteger: + raise Http404("No page '%s'" % page) + # If lastmod is defined for all sites, set header so as + # ConditionalGetMiddleware is able to send 304 NOT MODIFIED + if all_sites_lastmod: + headers = {"Last-Modified": http_date(lastmod.timestamp())} if lastmod else None + else: + headers = None + return TemplateResponse( + request, + template_name, + {"urlset": urls}, + content_type=content_type, + headers=headers, + ) diff --git a/testbed/django__django/django/contrib/sites/locale/af/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/af/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..d6132674e1b38f3c3930c814b51dfd4fabf5a8b4 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/af/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/af/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/af/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..e4742e2e8c85f53d4427b64dec203cbe87c54f0e --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/af/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# F Wolff , 2019 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2019-01-04 18:46+0000\n" +"Last-Translator: F Wolff \n" +"Language-Team: Afrikaans (http://www.transifex.com/django/django/language/" +"af/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: af\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Werwe" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Die domeinnaam mag nie spasies of tab-karakters bevat nie." + +msgid "domain name" +msgstr "domeinnaam" + +msgid "display name" +msgstr "vertoonnaam" + +msgid "site" +msgstr "werf" + +msgid "sites" +msgstr "werwe" diff --git a/testbed/django__django/django/contrib/sites/locale/ar/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/ar/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..bc0ef772a5aa4d53cdd84b1ed43f02220fb21f34 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/ar/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/ar/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/ar/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..f04d717bc15535fc7537ba2cac08c3fbb4ac0f2f --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/ar/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Bashar Al-Abdulhadi, 2014 +# Eyad Toma , 2013 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Arabic (http://www.transifex.com/django/django/language/ar/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ar\n" +"Plural-Forms: nplurals=6; plural=n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 " +"&& n%100<=10 ? 3 : n%100>=11 && n%100<=99 ? 4 : 5;\n" + +msgid "Sites" +msgstr "المواقع" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "اسم النطاق يجب أن لا يحتوي على فراغات أو فراغات طويلة tabs." + +msgid "domain name" +msgstr "اسم النطاق" + +msgid "display name" +msgstr "اسم العرض" + +msgid "site" +msgstr "موقع" + +msgid "sites" +msgstr "المواقع" diff --git a/testbed/django__django/django/contrib/sites/locale/ar_DZ/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/ar_DZ/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..35e42fa932de8c811891c89feca90443cffc0214 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/ar_DZ/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/ar_DZ/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/ar_DZ/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..815f97304b709631b0d906828a97c2384d10e64c --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/ar_DZ/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Riterix , 2019 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2019-12-14 22:52+0000\n" +"Last-Translator: Riterix \n" +"Language-Team: Arabic (Algeria) (http://www.transifex.com/django/django/" +"language/ar_DZ/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ar_DZ\n" +"Plural-Forms: nplurals=6; plural=n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 " +"&& n%100<=10 ? 3 : n%100>=11 && n%100<=99 ? 4 : 5;\n" + +msgid "Sites" +msgstr "المواقع" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "اسم النطاق يجب أن لا يحتوي على فراغات أو فراغات طويلة tabs." + +msgid "domain name" +msgstr "اسم النطاق" + +msgid "display name" +msgstr "اسم العرض" + +msgid "site" +msgstr "موقع" + +msgid "sites" +msgstr "المواقع" diff --git a/testbed/django__django/django/contrib/sites/locale/ast/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/ast/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..0f14b988a27d8347b6988fc8a374b5af35331f1a Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/ast/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/ast/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/ast/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..4fff0b44d5445e2d418b29e7b1ee9894423111a3 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/ast/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Ḷḷumex03 , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 19:51+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Asturian (http://www.transifex.com/django/django/language/" +"ast/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ast\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "El nome de dominiu nun pue contener dengún espaciu o tabulación." + +msgid "domain name" +msgstr "nome de dominiu" + +msgid "display name" +msgstr "amosar nome" + +msgid "site" +msgstr "sitiu" + +msgid "sites" +msgstr "sitios" diff --git a/testbed/django__django/django/contrib/sites/locale/az/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/az/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..0ec03f9843b0cffcae4726f7835e1c4f5bae2d0d Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/az/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/az/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/az/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..cd3d29bc3aa8ea2e0e0870894d876012d8cfef44 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/az/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Ali Ismayilov , 2011 +# Emin Mastizada , 2018 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2018-04-27 17:01+0000\n" +"Last-Translator: Emin Mastizada \n" +"Language-Team: Azerbaijani (http://www.transifex.com/django/django/language/" +"az/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: az\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Saytlar" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Domen adında boşluq və tab boşluğu olmamalıdır." + +msgid "domain name" +msgstr "domen" + +msgid "display name" +msgstr "adı" + +msgid "site" +msgstr "sayt" + +msgid "sites" +msgstr "saytlar" diff --git a/testbed/django__django/django/contrib/sites/locale/be/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/be/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..ec75cef7a7a71d99bf360bbaddb0ed3dcc0d0005 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/be/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/be/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/be/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..8b44db3d9e69e54f0233d6141deeba7996cf477c --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/be/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Viktar Palstsiuk , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Viktar Palstsiuk \n" +"Language-Team: Belarusian (http://www.transifex.com/django/django/language/" +"be/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: be\n" +"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" +"%100>=11 && n%100<=14)? 2 : 3);\n" + +msgid "Sites" +msgstr "Сайты" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Даменнае імя ня можа ўтрымліваць прабелы або сымбалі табуляцыі." + +msgid "domain name" +msgstr "назва дамэна" + +msgid "display name" +msgstr "бачная назва" + +msgid "site" +msgstr "сайт" + +msgid "sites" +msgstr "сайты" diff --git a/testbed/django__django/django/contrib/sites/locale/bg/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/bg/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..b337e3d46bbdfecd2045d8d9f826f7a3083937eb Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/bg/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/bg/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/bg/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..4ebb4ba03b241a603402dd0bff70212c29b389fa --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/bg/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Lyuboslav Petrov , 2014 +# vestimir , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Bulgarian (http://www.transifex.com/django/django/language/" +"bg/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Сайтове" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Името на домейна не може да съдържа никакви интервали или табулации." + +msgid "domain name" +msgstr "име на домейна" + +msgid "display name" +msgstr "наименование" + +msgid "site" +msgstr "сайт" + +msgid "sites" +msgstr "сайтове" diff --git a/testbed/django__django/django/contrib/sites/locale/bn/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/bn/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..11265f6b1803a7eb5bfd553b62351104229fee66 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/bn/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/bn/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/bn/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..2325fe35fae14324393d6084771dcfa8e7a12ab4 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/bn/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Tahmid Rafi , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Tahmid Rafi \n" +"Language-Team: Bengali (http://www.transifex.com/django/django/language/" +"bn/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bn\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "সাইট" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "ডোমেইন নাম এ কোন স্পেস বা ট্যাব থাকতে পারবে না।" + +msgid "domain name" +msgstr "ডোমেইন নাম" + +msgid "display name" +msgstr "ডিসপ্লে নাম" + +msgid "site" +msgstr "সাইট" + +msgid "sites" +msgstr "সাইটসমূহ" diff --git a/testbed/django__django/django/contrib/sites/locale/br/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/br/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..e8547baee69b59d7759b07147b5f2924dbd974f8 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/br/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/br/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/br/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..4294e2b36b5983a9aac402b71a7f4b07579d753e --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/br/LC_MESSAGES/django.po @@ -0,0 +1,40 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Fulup , 2012 +# Irriep Nala Novram , 2018 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2018-10-19 23:07+0000\n" +"Last-Translator: Irriep Nala Novram \n" +"Language-Team: Breton (http://www.transifex.com/django/django/language/br/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: br\n" +"Plural-Forms: nplurals=5; plural=((n%10 == 1) && (n%100 != 11) && (n%100 !" +"=71) && (n%100 !=91) ? 0 :(n%10 == 2) && (n%100 != 12) && (n%100 !=72) && (n" +"%100 !=92) ? 1 :(n%10 ==3 || n%10==4 || n%10==9) && (n%100 < 10 || n% 100 > " +"19) && (n%100 < 70 || n%100 > 79) && (n%100 < 90 || n%100 > 99) ? 2 :(n != 0 " +"&& n % 1000000 == 0) ? 3 : 4);\n" + +msgid "Sites" +msgstr "Lec'hiennoù" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "An anv domani n'hall na enderc'hel spasoù pe taolennadurioù." + +msgid "domain name" +msgstr "anv domani" + +msgid "display name" +msgstr "anv da ziskouez" + +msgid "site" +msgstr "lec'hienn" + +msgid "sites" +msgstr "lec'hiennoù" diff --git a/testbed/django__django/django/contrib/sites/locale/bs/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/bs/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..1b76e2fcb12a4cc4c62634a5e20db1136760eb0d Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/bs/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/bs/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/bs/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..7a10cea2b3fe6231a9b8e4c2bb314a7e0e24ddd5 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/bs/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Bosnian (http://www.transifex.com/django/django/language/" +"bs/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bs\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "ime domena" + +msgid "display name" +msgstr "prikazano ime" + +msgid "site" +msgstr "sajt" + +msgid "sites" +msgstr "sajtovi" diff --git a/testbed/django__django/django/contrib/sites/locale/ca/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/ca/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..36107e04e36baa817bf05bba19af0e9a8fee559b Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/ca/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/ca/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/ca/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..22f2c13ad3488d59d736ced7f157e737e33c4e79 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/ca/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Antoni Aloy , 2013 +# Carles Barrobés , 2014 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Catalan (http://www.transifex.com/django/django/language/" +"ca/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ca\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Llocs" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "El nom de domini no pot contenir cap espai ni tabuladors." + +msgid "domain name" +msgstr "nom del domini" + +msgid "display name" +msgstr "nom per mostrar" + +msgid "site" +msgstr "lloc" + +msgid "sites" +msgstr "llocs" diff --git a/testbed/django__django/django/contrib/sites/locale/cs/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/cs/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..26ea69183b868354bbbb51f25ca2d5400a520a68 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/cs/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/cs/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/cs/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..3fb2c3a08fa487cab08c51bfbf4d1643a198c075 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/cs/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Vláďa Macek , 2013-2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Czech (http://www.transifex.com/django/django/language/cs/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: cs\n" +"Plural-Forms: nplurals=4; plural=(n == 1 && n % 1 == 0) ? 0 : (n >= 2 && n " +"<= 4 && n % 1 == 0) ? 1: (n % 1 != 0 ) ? 2 : 3;\n" + +msgid "Sites" +msgstr "Weby" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Název domény nemůže obsahovat mezery ani tabulátory." + +msgid "domain name" +msgstr "název domény" + +msgid "display name" +msgstr "zobrazený název" + +msgid "site" +msgstr "web" + +msgid "sites" +msgstr "weby" diff --git a/testbed/django__django/django/contrib/sites/locale/cy/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/cy/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..d4b4ecd5ccace550db978900903f2a95ea61137e Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/cy/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/cy/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/cy/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..5e37db2e7510db7599c302839b381dfdb4ba3207 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/cy/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Maredudd ap Gwyndaf , 2013-2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Maredudd ap Gwyndaf \n" +"Language-Team: Welsh (http://www.transifex.com/django/django/language/cy/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: cy\n" +"Plural-Forms: nplurals=4; plural=(n==1) ? 0 : (n==2) ? 1 : (n != 8 && n != " +"11) ? 2 : 3;\n" + +msgid "Sites" +msgstr "Safleoedd" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Ni all yr enw parth gynnwys bylchau neu dabiau." + +msgid "domain name" +msgstr "enw parth" + +msgid "display name" +msgstr "enw arddangos" + +msgid "site" +msgstr "safle" + +msgid "sites" +msgstr "safleoedd" diff --git a/testbed/django__django/django/contrib/sites/locale/da/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/da/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..2380ff3fc69f4fa5004461f97211e40c95cdee5e Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/da/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/da/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/da/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..762611976348c8539b9d7fef0db5f38bea96a7f1 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/da/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Erik Wognsen , 2013-2014 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Danish (http://www.transifex.com/django/django/language/da/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: da\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Websider" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Domænenavnet må ikke indeholde mellemrum eller tabulatorer." + +msgid "domain name" +msgstr "domænenavn" + +msgid "display name" +msgstr "vist navn" + +msgid "site" +msgstr "webside" + +msgid "sites" +msgstr "websider" diff --git a/testbed/django__django/django/contrib/sites/locale/de/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/de/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..74e16850621babd51524d2c46cefdd267737f75e Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/de/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/de/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/de/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..602912402f6ef6ab71d0c7447a345df1dcb009cd --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/de/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011,2013-2014,2016 +# Markus Holtermann , 2013 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: German (http://www.transifex.com/django/django/language/de/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: de\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Websites" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Der Domainname darf keine Leerzeichen oder Tabs enthalten." + +msgid "domain name" +msgstr "Domainname" + +msgid "display name" +msgstr "Anzeigename" + +msgid "site" +msgstr "Website" + +msgid "sites" +msgstr "Websites" diff --git a/testbed/django__django/django/contrib/sites/locale/dsb/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/dsb/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..8179a5c186a4a4d78705281dab19c1ea53288313 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/dsb/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/dsb/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/dsb/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..b8706de2cb67e387325824878fb8b366612724e5 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/dsb/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Michael Wolf , 2016 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 00:02+0000\n" +"Last-Translator: Michael Wolf \n" +"Language-Team: Lower Sorbian (http://www.transifex.com/django/django/" +"language/dsb/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: dsb\n" +"Plural-Forms: nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n" +"%100==4 ? 2 : 3);\n" + +msgid "Sites" +msgstr "Sedła" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Domenowe mě njamóžo prozne znamjenja abo tabulatory wopśimowaś." + +msgid "domain name" +msgstr "domenowe mě" + +msgid "display name" +msgstr "zwobraznjeńske mě" + +msgid "site" +msgstr "sedło" + +msgid "sites" +msgstr "sedła" diff --git a/testbed/django__django/django/contrib/sites/locale/el/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/el/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..10479c1374020b8ac21a3093ec6c5686f80ee04b Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/el/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/el/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/el/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..94d8e190c5169c767c2a191618f3fecf26518ae4 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/el/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Nikolas Demiridis , 2014 +# Pãnoș , 2014 +# Pãnoș , 2016 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Pãnoș \n" +"Language-Team: Greek (http://www.transifex.com/django/django/language/el/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: el\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Ιστότοποι" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Το όνομα τομέα δεν μπορεί να περιέχει κενά." + +msgid "domain name" +msgstr "όνομα τομέα" + +msgid "display name" +msgstr "εμφανιζόμενο όνομα" + +msgid "site" +msgstr "ιστότοπος" + +msgid "sites" +msgstr "ιστότοποι" diff --git a/testbed/django__django/django/contrib/sites/locale/en/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/en/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..08a7b68596a8a494a33644935e4ca6d40be6447f Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/en/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/en/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/en/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..3b1884c4af547dd1853fe11c7f85b66831e8c36b --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/en/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +msgid "" +msgstr "" +"Project-Id-Version: Django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2010-05-13 15:35+0200\n" +"Last-Translator: Django team\n" +"Language-Team: English \n" +"Language: en\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: contrib/sites/apps.py:11 +msgid "Sites" +msgstr "" + +#: contrib/sites/models.py:30 +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +#: contrib/sites/models.py:81 +msgid "domain name" +msgstr "" + +#: contrib/sites/models.py:83 +msgid "display name" +msgstr "" + +#: contrib/sites/models.py:88 +msgid "site" +msgstr "" + +#: contrib/sites/models.py:89 +msgid "sites" +msgstr "" diff --git a/testbed/django__django/django/contrib/sites/locale/en_AU/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/en_AU/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..e6e4917ecfa5f36456e8b72792fcd69d187921fc Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/en_AU/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/en_AU/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/en_AU/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..0659ec5f0ccacef852001f460c70bcf862f6e05b --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/en_AU/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Tom Fifield , 2021 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2021-04-11 13:15+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: English (Australia) (http://www.transifex.com/django/django/" +"language/en_AU/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: en_AU\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sites" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "The domain name cannot contain any spaces or tabs." + +msgid "domain name" +msgstr "domain name" + +msgid "display name" +msgstr "display name" + +msgid "site" +msgstr "site" + +msgid "sites" +msgstr "sites" diff --git a/testbed/django__django/django/contrib/sites/locale/en_GB/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/en_GB/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..772da82508d299330f6e1945db356175f12d284f Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/en_GB/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/en_GB/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/en_GB/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..b212e3493ddb474ca638adf40756e985f389af4e --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/en_GB/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Ross Poulton , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: English (United Kingdom) (http://www.transifex.com/django/" +"django/language/en_GB/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: en_GB\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "domain name" + +msgid "display name" +msgstr "display name" + +msgid "site" +msgstr "site" + +msgid "sites" +msgstr "sites" diff --git a/testbed/django__django/django/contrib/sites/locale/eo/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/eo/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..23a47003150ffb1da635e509b2021ce0bbcaf68a Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/eo/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/eo/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/eo/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..a65e597f04372c578c6e8279e04bd146a870df8d --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/eo/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Baptiste Darthenay , 2013 +# Baptiste Darthenay , 2014 +# kristjan , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Esperanto (http://www.transifex.com/django/django/language/" +"eo/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: eo\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Retejoj" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Tiu domajna nomo ne povas enteni nek spacojn nek tabojn." + +msgid "domain name" +msgstr "domajna nomo" + +msgid "display name" +msgstr "vidiga nomo" + +msgid "site" +msgstr "retejo" + +msgid "sites" +msgstr "retejoj" diff --git a/testbed/django__django/django/contrib/sites/locale/es/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/es/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..8e3f12b8b1828b5ba0fa6c0bd0b3c9ca3551bc44 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/es/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/es/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/es/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..89fbdbc8cf769e1a83eee4a4c3c26d5914c6d764 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/es/LC_MESSAGES/django.po @@ -0,0 +1,39 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# abraham.martin , 2014 +# Antoni Aloy , 2013 +# Ernesto Avilés Vázquez , 2014 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Spanish (http://www.transifex.com/django/django/language/" +"es/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sitios" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "El nombre de dominio no puede contener espacios ni tabulaciones" + +msgid "domain name" +msgstr "nombre de dominio" + +msgid "display name" +msgstr "nombre a mostrar" + +msgid "site" +msgstr "sitio" + +msgid "sites" +msgstr "sitios" diff --git a/testbed/django__django/django/contrib/sites/locale/es_AR/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/es_AR/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..cc883045155cfefa309e3605aabb9cd43b5c8515 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/es_AR/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/es_AR/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/es_AR/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..2ea43071a6e6b2047d0f2bf849bc6fc7b2abd6d8 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/es_AR/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Ramiro Morales, 2013-2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Ramiro Morales\n" +"Language-Team: Spanish (Argentina) (http://www.transifex.com/django/django/" +"language/es_AR/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es_AR\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sitios" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" +"El nombre de dominio no puede incluir espacios ni marcas de tabulación." + +msgid "domain name" +msgstr "nombre de dominio" + +msgid "display name" +msgstr "nombre para visualizar" + +msgid "site" +msgstr "sitio" + +msgid "sites" +msgstr "sitios" diff --git a/testbed/django__django/django/contrib/sites/locale/es_CO/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/es_CO/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..1fff5d9b5ccb4648390c2362418e4ae0cf3de4f9 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/es_CO/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/es_CO/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/es_CO/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..9344265cc3dd8ffe1eef9bd515a8ceaf2b1f25ac --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/es_CO/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Carlos Muñoz , 2015 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 21:51+0000\n" +"Last-Translator: Carlos Muñoz \n" +"Language-Team: Spanish (Colombia) (http://www.transifex.com/django/django/" +"language/es_CO/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es_CO\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sitios" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "El nombre de dominio no puede contener espacios y/o tabulaciones" + +msgid "domain name" +msgstr "nombre de dominio" + +msgid "display name" +msgstr "nombre a mostrar" + +msgid "site" +msgstr "sitio" + +msgid "sites" +msgstr "sitios" diff --git a/testbed/django__django/django/contrib/sites/locale/es_MX/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/es_MX/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..02a2e5340ac34fb81277fd5d8771c0d59a0a402f Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/es_MX/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/es_MX/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/es_MX/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..29d44015c799e4d36deff3f01085fd4fd96cc937 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/es_MX/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Abraham Estrada, 2011 +# Carlos Castro , 2011 +# zodman , 2015 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: zodman \n" +"Language-Team: Spanish (Mexico) (http://www.transifex.com/django/django/" +"language/es_MX/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es_MX\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sitios" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "El nombre de dominio no puede contener espacios o tabulaciones." + +msgid "domain name" +msgstr "nombre del dominio" + +msgid "display name" +msgstr "nombre visible" + +msgid "site" +msgstr "sitio" + +msgid "sites" +msgstr "sitios" diff --git a/testbed/django__django/django/contrib/sites/locale/es_VE/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/es_VE/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..32f6e8fd6f68cc26000df003f93a77c87967a3b7 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/es_VE/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/es_VE/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/es_VE/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..a6b74fae997b0bf26b6f526fcc05527c29badd78 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/es_VE/LC_MESSAGES/django.po @@ -0,0 +1,35 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2014-10-05 20:12+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Spanish (Venezuela) (http://www.transifex.com/projects/p/" +"django/language/es_VE/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es_VE\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "" + +msgid "display name" +msgstr "" + +msgid "site" +msgstr "" + +msgid "sites" +msgstr "" diff --git a/testbed/django__django/django/contrib/sites/locale/et/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/et/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..3e3e38acca07d5602e817d28d14685d0515e1b83 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/et/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/et/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/et/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..91ef8f3c0bb25d1122ce4ec2249e75026813e4df --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/et/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Janno Liivak , 2013 +# Marti Raudsepp , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Estonian (http://www.transifex.com/django/django/language/" +"et/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: et\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Saidid" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Domeeni nimes ei tohi olla ei tühikuid ega tabeldusmärke." + +msgid "domain name" +msgstr "domeeninimi" + +msgid "display name" +msgstr "hüüdnimi" + +msgid "site" +msgstr "sait" + +msgid "sites" +msgstr "saidid" diff --git a/testbed/django__django/django/contrib/sites/locale/eu/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/eu/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..ca7a350ae193497bf5af1a67094a2a99413717dc Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/eu/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/eu/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/eu/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..d7d413859c1ecdce398d0771c8e18dd425a39d38 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/eu/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Ander Martínez , 2013 +# Eneko Illarramendi , 2017 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Eneko Illarramendi \n" +"Language-Team: Basque (http://www.transifex.com/django/django/language/eu/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: eu\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Webguneak" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Domeinu izenak ezin du zuriunerik edo tabuladorerik eduki." + +msgid "domain name" +msgstr "domeinu-izena" + +msgid "display name" +msgstr "erakusteko izena" + +msgid "site" +msgstr "webgunea" + +msgid "sites" +msgstr "webguneak" diff --git a/testbed/django__django/django/contrib/sites/locale/fa/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/fa/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..7bc004f3b11ebf24857145896a26222199e161ed Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/fa/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/fa/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/fa/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..ba91492439ed2476a2663c5dd82dbf8031ae233c --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/fa/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Mohammad Hossein Mojtahedi , 2013 +# Reza Mohammadi , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Persian (http://www.transifex.com/django/django/language/" +"fa/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fa\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +msgid "Sites" +msgstr "وب‌گاه‌ها" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "نام دامنه نمی‌تواند حاوی فاصله یا پرش -tab- باشد." + +msgid "domain name" +msgstr "نام دامنه" + +msgid "display name" +msgstr "نام نمایش داده شده" + +msgid "site" +msgstr "وب‌گاه" + +msgid "sites" +msgstr "وب‌گاه‌ها" diff --git a/testbed/django__django/django/contrib/sites/locale/fi/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/fi/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..65ba6a036382fc3f7b2c932d4b1442ae04e09241 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/fi/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/fi/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/fi/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..13c88cef49c61656bf16b21f6c8aad46ee88f61f --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/fi/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Aarni Koskela, 2015 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Aarni Koskela\n" +"Language-Team: Finnish (http://www.transifex.com/django/django/language/" +"fi/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fi\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sivustot" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Verkkotunnus ei voi sisältää välilyöntejä tai sarkainmerkkejä." + +msgid "domain name" +msgstr "verkkotunnus" + +msgid "display name" +msgstr "näyttönimi" + +msgid "site" +msgstr "sivusto" + +msgid "sites" +msgstr "sivustot" diff --git a/testbed/django__django/django/contrib/sites/locale/fr/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/fr/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..54df18ddb8eb29b31629e73d48256c4f5b7576f6 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/fr/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/fr/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/fr/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..26f86dc09bf72cc7be9e8b8d8fd710a319a21e60 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/fr/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Claude Paroz , 2014 +# Claude Paroz , 2013 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: French (http://www.transifex.com/django/django/language/fr/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fr\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +msgid "Sites" +msgstr "Sites" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Le nom de domaine ne peut pas contenir d'espace ni de tabulation." + +msgid "domain name" +msgstr "nom de domaine" + +msgid "display name" +msgstr "nom à afficher" + +msgid "site" +msgstr "site" + +msgid "sites" +msgstr "sites" diff --git a/testbed/django__django/django/contrib/sites/locale/fy/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/fy/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..489bbab4f0f9b2ca1e5bb1fa90dbb3c412f9ae2d Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/fy/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/fy/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/fy/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..599ab98a04cac964aa560a24dcd9a931b2b86ef2 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/fy/LC_MESSAGES/django.po @@ -0,0 +1,35 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2014-10-05 20:13+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Western Frisian (http://www.transifex.com/projects/p/django/" +"language/fy/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fy\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "" + +msgid "display name" +msgstr "" + +msgid "site" +msgstr "" + +msgid "sites" +msgstr "" diff --git a/testbed/django__django/django/contrib/sites/locale/ga/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/ga/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..c15ebefd56238c5ebf598c73db7df0c6d0ae6c9e Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/ga/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/ga/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/ga/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..095d08db92ed77c5754df710fc923f1d14229c5b --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/ga/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Luke Blaney , 2019 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2019-06-22 21:46+0000\n" +"Last-Translator: Luke Blaney \n" +"Language-Team: Irish (http://www.transifex.com/django/django/language/ga/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ga\n" +"Plural-Forms: nplurals=5; plural=(n==1 ? 0 : n==2 ? 1 : n<7 ? 2 : n<11 ? 3 : " +"4);\n" + +msgid "Sites" +msgstr "Suíomhanna" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "ainm fearainn" + +msgid "display name" +msgstr "ainm taispeáinta" + +msgid "site" +msgstr "suíomh" + +msgid "sites" +msgstr "suíomhanna" diff --git a/testbed/django__django/django/contrib/sites/locale/gd/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/gd/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..726462c853f9c57cf1574a589ed1b76132d35bb7 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/gd/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/gd/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/gd/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..642691878500dccfb069af60a7e1fd4b482dac0c --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/gd/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# GunChleoc, 2015 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-22 17:29+0000\n" +"Last-Translator: GunChleoc\n" +"Language-Team: Gaelic, Scottish (http://www.transifex.com/django/django/" +"language/gd/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: gd\n" +"Plural-Forms: nplurals=4; plural=(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : " +"(n > 2 && n < 20) ? 2 : 3;\n" + +msgid "Sites" +msgstr "Làraichean" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Chan fhaod beàrnan no tabaichean a bhith ann an ainm àrainne." + +msgid "domain name" +msgstr "ainm àrainne" + +msgid "display name" +msgstr "ainm taisbeanaidh" + +msgid "site" +msgstr "làrach" + +msgid "sites" +msgstr "làraichean" diff --git a/testbed/django__django/django/contrib/sites/locale/gl/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/gl/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..ddf5436e764263bb60b756757ea807ac08cfc03d --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/gl/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Leandro Regueiro , 2013 +# X Bello , 2023 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2023-04-24 18:05+0000\n" +"Last-Translator: X Bello , 2023\n" +"Language-Team: Galician (http://www.transifex.com/django/django/language/" +"gl/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: gl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sitios" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "O dominio non pode conter espazos nin tabulacións." + +msgid "domain name" +msgstr "dominio" + +msgid "display name" +msgstr "nome" + +msgid "site" +msgstr "sitio" + +msgid "sites" +msgstr "sitios" diff --git a/testbed/django__django/django/contrib/sites/locale/he/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/he/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..62de695f3dc9ef89fe5920ae2e2875ca1780736b --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/he/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Meir Kriheli , 2013-2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Hebrew (http://www.transifex.com/django/django/language/he/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: he\n" +"Plural-Forms: nplurals=4; plural=(n == 1 && n % 1 == 0) ? 0 : (n == 2 && n % " +"1 == 0) ? 1: (n % 10 == 0 && n % 1 == 0 && n > 10) ? 2 : 3;\n" + +msgid "Sites" +msgstr "אתרים" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "שם המתחם אינו יכול להכיל רווחים או טאבים." + +msgid "domain name" +msgstr "שם מתחם" + +msgid "display name" +msgstr "שם לתצוגה" + +msgid "site" +msgstr "אתר" + +msgid "sites" +msgstr "אתרים" diff --git a/testbed/django__django/django/contrib/sites/locale/hi/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/hi/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..699024a0f25a4856e7ab8c178ece3c37eee961ce Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/hi/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/hi/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/hi/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..437c402caf621dc5b81a6731cdb441454a51eaae --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/hi/LC_MESSAGES/django.po @@ -0,0 +1,35 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Hindi (http://www.transifex.com/django/django/language/hi/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: hi\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "डोमेन नाम" + +msgid "display name" +msgstr "प्रदर्शन नाम" + +msgid "site" +msgstr "साइट" + +msgid "sites" +msgstr "साइट" diff --git a/testbed/django__django/django/contrib/sites/locale/hr/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/hr/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..cd6a0dd92212c0fa36c815e3705678e94451338f --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/hr/LC_MESSAGES/django.po @@ -0,0 +1,38 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +# Mislav Cimperšak , 2015 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Mislav Cimperšak \n" +"Language-Team: Croatian (http://www.transifex.com/django/django/language/" +"hr/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: hr\n" +"Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;\n" + +msgid "Sites" +msgstr "Stranice" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Naziv domene ne može sadržavati razmake ili tabove." + +msgid "domain name" +msgstr "ime domene" + +msgid "display name" +msgstr "ime za prikaz" + +msgid "site" +msgstr "stranica" + +msgid "sites" +msgstr "stranice" diff --git a/testbed/django__django/django/contrib/sites/locale/hu/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/hu/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..6a511f9f8964c33c7d83455e6e5b6af6a84e1295 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/hu/LC_MESSAGES/django.po @@ -0,0 +1,37 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# András Veres-Szentkirályi, 2016 +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: János R (Hangya)\n" +"Language-Team: Hungarian (http://www.transifex.com/django/django/language/" +"hu/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: hu\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Honlapok" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "A domainnév nem tartalmazhat szóközöket és tabulátorokat." + +msgid "domain name" +msgstr "tartománynév" + +msgid "display name" +msgstr "megjelenítendő név" + +msgid "site" +msgstr "honlap" + +msgid "sites" +msgstr "honlapok" diff --git a/testbed/django__django/django/contrib/sites/locale/pt_BR/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/pt_BR/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..81c63003828b402e662f431a666d913529fea74c Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/pt_BR/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/ro/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/ro/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..d5579e3f36cd54710699a26c2f7d222b77cb1c0d --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/ro/LC_MESSAGES/django.po @@ -0,0 +1,40 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Daniel Ursache-Dogariu, 2011 +# Denis Darii , 2014 +# Jannis Leidel , 2011 +# Razvan Stefanescu , 2015 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Razvan Stefanescu \n" +"Language-Team: Romanian (http://www.transifex.com/django/django/language/" +"ro/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ro\n" +"Plural-Forms: nplurals=3; plural=(n==1?0:(((n%100>19)||((n%100==0)&&(n!=0)))?" +"2:1));\n" + +msgid "Sites" +msgstr "Pagini web" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Numele de domeniu nu poate conține spații sau tab-uri." + +msgid "domain name" +msgstr "nume domeniu" + +msgid "display name" +msgstr "nume afișat" + +msgid "site" +msgstr "pagină web" + +msgid "sites" +msgstr "pagini web" diff --git a/testbed/django__django/django/contrib/sites/locale/sq/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/sq/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..cbd1a6a6c51b067cd8675eefc572b6890024f5da Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/sq/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/sq/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/sq/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..3ce983073fb71822e2855c784bb5a7dd9d18ab44 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/sq/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Besnik , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-11-29 23:00+0000\n" +"Last-Translator: Besnik \n" +"Language-Team: Albanian (http://www.transifex.com/django/django/language/" +"sq/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sq\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sajte" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Emri i përkatësisë s’mund të përmbajë hapësira ose tabulacione." + +msgid "domain name" +msgstr "emër përkatësie" + +msgid "display name" +msgstr "emër shfaqjeje" + +msgid "site" +msgstr "sajt" + +msgid "sites" +msgstr "sajte" diff --git a/testbed/django__django/django/contrib/sites/locale/sr/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/sr/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..8ac57bb43961646cfe438ce8dd08b6619c68ec43 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/sr/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/sr_Latn/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/sr_Latn/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..deabe45b851bded3ebb3c61f7cbcd26bcb003b5c Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/sr_Latn/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/sv/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/sv/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..d727d90ed5f0868d99ec48a674a75410843d63c4 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/sv/LC_MESSAGES/django.po @@ -0,0 +1,39 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Anders Hovmöller , 2023 +# Jannis Leidel , 2011 +# Jonathan Lindén, 2014 +# Thomas Lundqvist, 2013 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2023-04-24 18:05+0000\n" +"Last-Translator: Anders Hovmöller , 2023\n" +"Language-Team: Swedish (http://www.transifex.com/django/django/language/" +"sv/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sv\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Webbplatser" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Domännamnet kan inte innehålla mellanslag eller tab." + +msgid "domain name" +msgstr "domännamn" + +msgid "display name" +msgstr "visningsnamn" + +msgid "site" +msgstr "webbplats" + +msgid "sites" +msgstr "webbplatser" diff --git a/testbed/django__django/django/contrib/sites/locale/sw/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/sw/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..1447715c8170a59dc299829e66bcaeceff181939 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/sw/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/sw/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/sw/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..c3206e96076f47dd7bdf9bfaaaa40fccbb413f60 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/sw/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Machaku , 2013-2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Swahili (http://www.transifex.com/django/django/language/" +"sw/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sw\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Tovuti" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Jina la kikoa haliwezi kuwa na nafasi yeyote kati yake." + +msgid "domain name" +msgstr "jina la kikoa" + +msgid "display name" +msgstr "jina" + +msgid "site" +msgstr "tovuti" + +msgid "sites" +msgstr "tovuti" diff --git a/testbed/django__django/django/contrib/sites/locale/ta/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/ta/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..9cdaa1e61499291b4abd0b89d000a27b97421518 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/ta/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/ta/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/ta/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..883c04a3788758d61358876cffbff56f737cce41 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/ta/LC_MESSAGES/django.po @@ -0,0 +1,35 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Tamil (http://www.transifex.com/django/django/language/ta/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ta\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "களப் பெயர்" + +msgid "display name" +msgstr "காட்டும் பெயர்" + +msgid "site" +msgstr "வலைத்தளம்" + +msgid "sites" +msgstr "வலைத்தளங்கள்" diff --git a/testbed/django__django/django/contrib/sites/locale/te/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/te/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..3598aa634082819ef121087ee6a08bffb3704cb2 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/te/LC_MESSAGES/django.po @@ -0,0 +1,35 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Jannis Leidel , 2011 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Telugu (http://www.transifex.com/django/django/language/te/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: te\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "డొమైన్ నామము" + +msgid "display name" +msgstr "కనిపిచ్చే పేరు" + +msgid "site" +msgstr "సైట్" + +msgid "sites" +msgstr "సైట్లు" diff --git a/testbed/django__django/django/contrib/sites/locale/tg/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/tg/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..e557293a562a9300402f2399b1c9d62ae08473d2 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/tg/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/tg/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/tg/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..cc7f86ec12359227b07d585f50196dafedc42ef3 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/tg/LC_MESSAGES/django.po @@ -0,0 +1,35 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Surush Sufiew , 2020 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2020-05-15 00:35+0000\n" +"Last-Translator: Surush Sufiew \n" +"Language-Team: Tajik (http://www.transifex.com/django/django/language/tg/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Сомонаҳо" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Номи домен наметавонад аз фосилаҳо ва табулятсия иборат бошад." + +msgid "domain name" +msgstr "номи доменӣ" + +msgid "display name" +msgstr "номи инъикосшуда" + +msgid "site" +msgstr "сомона" + +msgid "sites" +msgstr "сомонаҳо" diff --git a/testbed/django__django/django/contrib/sites/locale/th/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/th/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..bd461b668dca342ee05db498f43f27c480f420b4 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/th/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/tk/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/tk/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..a54affa5fa2e08391f7adf8ac9ebac2015974407 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/tk/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Resulkary , 2020 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2020-07-06 11:50+0000\n" +"Last-Translator: Resulkary \n" +"Language-Team: Turkmen (http://www.transifex.com/django/django/language/" +"tk/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tk\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Sites" +msgstr "Sahypalar" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Domen adynda boşluklar ýa-da goýmalar bolup bilmez." + +msgid "domain name" +msgstr "domen ady" + +msgid "display name" +msgstr "görkezilýän ady" + +msgid "site" +msgstr "sahypa" + +msgid "sites" +msgstr "sahypalar" diff --git a/testbed/django__django/django/contrib/sites/locale/tt/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/tt/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..519ece305868d79819cdc02c7ad5caadbd95e9a3 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/tt/LC_MESSAGES/django.po @@ -0,0 +1,36 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Azat Khasanshin , 2011 +# v_ildar , 2014 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Jannis Leidel \n" +"Language-Team: Tatar (http://www.transifex.com/django/django/language/tt/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tt\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +msgid "Sites" +msgstr "Сайтлар" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "" + +msgid "domain name" +msgstr "домен исеме" + +msgid "display name" +msgstr "чагылдырылган исем" + +msgid "site" +msgstr "сайт" + +msgid "sites" +msgstr "сайтлар" diff --git a/testbed/django__django/django/contrib/sites/locale/uk/LC_MESSAGES/django.mo b/testbed/django__django/django/contrib/sites/locale/uk/LC_MESSAGES/django.mo new file mode 100644 index 0000000000000000000000000000000000000000..8ffb1b948f77e5bb1c9d537bcd875c273abb6418 Binary files /dev/null and b/testbed/django__django/django/contrib/sites/locale/uk/LC_MESSAGES/django.mo differ diff --git a/testbed/django__django/django/contrib/sites/locale/uk/LC_MESSAGES/django.po b/testbed/django__django/django/contrib/sites/locale/uk/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..f98454c9d4cbbe2bc0a4e0ae7b35d29b040b37a6 --- /dev/null +++ b/testbed/django__django/django/contrib/sites/locale/uk/LC_MESSAGES/django.po @@ -0,0 +1,41 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Oleksandr Chernihov , 2014 +# Jannis Leidel , 2011 +# Alex Bolotov , 2013 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-23 18:54+0000\n" +"Last-Translator: Mykola Zamkovoi \n" +"Language-Team: Ukrainian (http://www.transifex.com/django/django/language/" +"uk/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: uk\n" +"Plural-Forms: nplurals=4; plural=(n % 1 == 0 && n % 10 == 1 && n % 100 != " +"11 ? 0 : n % 1 == 0 && n % 10 >= 2 && n % 10 <= 4 && (n % 100 < 12 || n % " +"100 > 14) ? 1 : n % 1 == 0 && (n % 10 ==0 || (n % 10 >=5 && n % 10 <=9) || " +"(n % 100 >=11 && n % 100 <=14 )) ? 2: 3);\n" + +msgid "Sites" +msgstr "Сайти" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "Доменне ім'я не може містити пробіли або символи табуляції." + +msgid "domain name" +msgstr "доменне ім'я" + +msgid "display name" +msgstr "відображуване ім'я" + +msgid "site" +msgstr "сайт" + +msgid "sites" +msgstr "сайти" diff --git a/testbed/django__django/django/db/__init__.py b/testbed/django__django/django/db/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3cf4574a961ffc760d4291d08ab197818560ff5 --- /dev/null +++ b/testbed/django__django/django/db/__init__.py @@ -0,0 +1,61 @@ +from django.core import signals +from django.db.utils import ( + DEFAULT_DB_ALIAS, + DJANGO_VERSION_PICKLE_KEY, + ConnectionHandler, + ConnectionRouter, + DatabaseError, + DataError, + Error, + IntegrityError, + InterfaceError, + InternalError, + NotSupportedError, + OperationalError, + ProgrammingError, +) +from django.utils.connection import ConnectionProxy + +__all__ = [ + "connection", + "connections", + "router", + "DatabaseError", + "IntegrityError", + "InternalError", + "ProgrammingError", + "DataError", + "NotSupportedError", + "Error", + "InterfaceError", + "OperationalError", + "DEFAULT_DB_ALIAS", + "DJANGO_VERSION_PICKLE_KEY", +] + +connections = ConnectionHandler() + +router = ConnectionRouter() + +# For backwards compatibility. Prefer connections['default'] instead. +connection = ConnectionProxy(connections, DEFAULT_DB_ALIAS) + + +# Register an event to reset saved queries when a Django request is started. +def reset_queries(**kwargs): + for conn in connections.all(initialized_only=True): + conn.queries_log.clear() + + +signals.request_started.connect(reset_queries) + + +# Register an event to reset transaction state and close connections past +# their lifetime. +def close_old_connections(**kwargs): + for conn in connections.all(initialized_only=True): + conn.close_if_unusable_or_obsolete() + + +signals.request_started.connect(close_old_connections) +signals.request_finished.connect(close_old_connections) diff --git a/testbed/django__django/django/db/backends/__init__.py b/testbed/django__django/django/db/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/django__django/django/db/backends/ddl_references.py b/testbed/django__django/django/db/backends/ddl_references.py new file mode 100644 index 0000000000000000000000000000000000000000..412d07a993db9b4e7d12330c5a6dc303c90a3c31 --- /dev/null +++ b/testbed/django__django/django/db/backends/ddl_references.py @@ -0,0 +1,254 @@ +""" +Helpers to manipulate deferred DDL statements that might need to be adjusted or +discarded within when executing a migration. +""" +from copy import deepcopy + + +class Reference: + """Base class that defines the reference interface.""" + + def references_table(self, table): + """ + Return whether or not this instance references the specified table. + """ + return False + + def references_column(self, table, column): + """ + Return whether or not this instance references the specified column. + """ + return False + + def rename_table_references(self, old_table, new_table): + """ + Rename all references to the old_name to the new_table. + """ + pass + + def rename_column_references(self, table, old_column, new_column): + """ + Rename all references to the old_column to the new_column. + """ + pass + + def __repr__(self): + return "<%s %r>" % (self.__class__.__name__, str(self)) + + def __str__(self): + raise NotImplementedError( + "Subclasses must define how they should be converted to string." + ) + + +class Table(Reference): + """Hold a reference to a table.""" + + def __init__(self, table, quote_name): + self.table = table + self.quote_name = quote_name + + def references_table(self, table): + return self.table == table + + def rename_table_references(self, old_table, new_table): + if self.table == old_table: + self.table = new_table + + def __str__(self): + return self.quote_name(self.table) + + +class TableColumns(Table): + """Base class for references to multiple columns of a table.""" + + def __init__(self, table, columns): + self.table = table + self.columns = columns + + def references_column(self, table, column): + return self.table == table and column in self.columns + + def rename_column_references(self, table, old_column, new_column): + if self.table == table: + for index, column in enumerate(self.columns): + if column == old_column: + self.columns[index] = new_column + + +class Columns(TableColumns): + """Hold a reference to one or many columns.""" + + def __init__(self, table, columns, quote_name, col_suffixes=()): + self.quote_name = quote_name + self.col_suffixes = col_suffixes + super().__init__(table, columns) + + def __str__(self): + def col_str(column, idx): + col = self.quote_name(column) + try: + suffix = self.col_suffixes[idx] + if suffix: + col = "{} {}".format(col, suffix) + except IndexError: + pass + return col + + return ", ".join( + col_str(column, idx) for idx, column in enumerate(self.columns) + ) + + +class IndexName(TableColumns): + """Hold a reference to an index name.""" + + def __init__(self, table, columns, suffix, create_index_name): + self.suffix = suffix + self.create_index_name = create_index_name + super().__init__(table, columns) + + def __str__(self): + return self.create_index_name(self.table, self.columns, self.suffix) + + +class IndexColumns(Columns): + def __init__(self, table, columns, quote_name, col_suffixes=(), opclasses=()): + self.opclasses = opclasses + super().__init__(table, columns, quote_name, col_suffixes) + + def __str__(self): + def col_str(column, idx): + # Index.__init__() guarantees that self.opclasses is the same + # length as self.columns. + col = "{} {}".format(self.quote_name(column), self.opclasses[idx]) + try: + suffix = self.col_suffixes[idx] + if suffix: + col = "{} {}".format(col, suffix) + except IndexError: + pass + return col + + return ", ".join( + col_str(column, idx) for idx, column in enumerate(self.columns) + ) + + +class ForeignKeyName(TableColumns): + """Hold a reference to a foreign key name.""" + + def __init__( + self, + from_table, + from_columns, + to_table, + to_columns, + suffix_template, + create_fk_name, + ): + self.to_reference = TableColumns(to_table, to_columns) + self.suffix_template = suffix_template + self.create_fk_name = create_fk_name + super().__init__( + from_table, + from_columns, + ) + + def references_table(self, table): + return super().references_table(table) or self.to_reference.references_table( + table + ) + + def references_column(self, table, column): + return super().references_column( + table, column + ) or self.to_reference.references_column(table, column) + + def rename_table_references(self, old_table, new_table): + super().rename_table_references(old_table, new_table) + self.to_reference.rename_table_references(old_table, new_table) + + def rename_column_references(self, table, old_column, new_column): + super().rename_column_references(table, old_column, new_column) + self.to_reference.rename_column_references(table, old_column, new_column) + + def __str__(self): + suffix = self.suffix_template % { + "to_table": self.to_reference.table, + "to_column": self.to_reference.columns[0], + } + return self.create_fk_name(self.table, self.columns, suffix) + + +class Statement(Reference): + """ + Statement template and formatting parameters container. + + Allows keeping a reference to a statement without interpolating identifiers + that might have to be adjusted if they're referencing a table or column + that is removed + """ + + def __init__(self, template, **parts): + self.template = template + self.parts = parts + + def references_table(self, table): + return any( + hasattr(part, "references_table") and part.references_table(table) + for part in self.parts.values() + ) + + def references_column(self, table, column): + return any( + hasattr(part, "references_column") and part.references_column(table, column) + for part in self.parts.values() + ) + + def rename_table_references(self, old_table, new_table): + for part in self.parts.values(): + if hasattr(part, "rename_table_references"): + part.rename_table_references(old_table, new_table) + + def rename_column_references(self, table, old_column, new_column): + for part in self.parts.values(): + if hasattr(part, "rename_column_references"): + part.rename_column_references(table, old_column, new_column) + + def __str__(self): + return self.template % self.parts + + +class Expressions(TableColumns): + def __init__(self, table, expressions, compiler, quote_value): + self.compiler = compiler + self.expressions = expressions + self.quote_value = quote_value + columns = [ + col.target.column + for col in self.compiler.query._gen_cols([self.expressions]) + ] + super().__init__(table, columns) + + def rename_table_references(self, old_table, new_table): + if self.table != old_table: + return + self.expressions = self.expressions.relabeled_clone({old_table: new_table}) + super().rename_table_references(old_table, new_table) + + def rename_column_references(self, table, old_column, new_column): + if self.table != table: + return + expressions = deepcopy(self.expressions) + self.columns = [] + for col in self.compiler.query._gen_cols([expressions]): + if col.target.column == old_column: + col.target.column = new_column + self.columns.append(col.target.column) + self.expressions = expressions + + def __str__(self): + sql, params = self.compiler.compile(self.expressions) + params = map(self.quote_value, params) + return sql % tuple(params) diff --git a/testbed/django__django/django/db/backends/postgresql/features.py b/testbed/django__django/django/db/backends/postgresql/features.py new file mode 100644 index 0000000000000000000000000000000000000000..29b6a4f6c583f2e4ffbde870f76a90e7c264e687 --- /dev/null +++ b/testbed/django__django/django/db/backends/postgresql/features.py @@ -0,0 +1,137 @@ +import operator + +from django.db import DataError, InterfaceError +from django.db.backends.base.features import BaseDatabaseFeatures +from django.db.backends.postgresql.psycopg_any import is_psycopg3 +from django.utils.functional import cached_property + + +class DatabaseFeatures(BaseDatabaseFeatures): + minimum_database_version = (12,) + allows_group_by_selected_pks = True + can_return_columns_from_insert = True + can_return_rows_from_bulk_insert = True + has_real_datatype = True + has_native_uuid_field = True + has_native_duration_field = True + has_native_json_field = True + can_defer_constraint_checks = True + has_select_for_update = True + has_select_for_update_nowait = True + has_select_for_update_of = True + has_select_for_update_skip_locked = True + has_select_for_no_key_update = True + can_release_savepoints = True + supports_comments = True + supports_tablespaces = True + supports_transactions = True + can_introspect_materialized_views = True + can_distinct_on_fields = True + can_rollback_ddl = True + schema_editor_uses_clientside_param_binding = True + supports_combined_alters = True + nulls_order_largest = True + closed_cursor_error_class = InterfaceError + greatest_least_ignores_nulls = True + can_clone_databases = True + supports_temporal_subtraction = True + supports_slicing_ordering_in_compound = True + create_test_procedure_without_params_sql = """ + CREATE FUNCTION test_procedure () RETURNS void AS $$ + DECLARE + V_I INTEGER; + BEGIN + V_I := 1; + END; + $$ LANGUAGE plpgsql;""" + create_test_procedure_with_int_param_sql = """ + CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$ + DECLARE + V_I INTEGER; + BEGIN + V_I := P_I; + END; + $$ LANGUAGE plpgsql;""" + create_test_table_with_composite_primary_key = """ + CREATE TABLE test_table_composite_pk ( + column_1 INTEGER NOT NULL, + column_2 INTEGER NOT NULL, + PRIMARY KEY(column_1, column_2) + ) + """ + requires_casted_case_in_updates = True + supports_over_clause = True + only_supports_unbounded_with_preceding_and_following = True + supports_aggregate_filter_clause = True + supported_explain_formats = {"JSON", "TEXT", "XML", "YAML"} + supports_deferrable_unique_constraints = True + has_json_operators = True + json_key_contains_list_matching_requires_list = True + supports_update_conflicts = True + supports_update_conflicts_with_target = True + supports_covering_indexes = True + can_rename_index = True + test_collations = { + "non_default": "sv-x-icu", + "swedish_ci": "sv-x-icu", + } + test_now_utc_template = "STATEMENT_TIMESTAMP() AT TIME ZONE 'UTC'" + insert_test_table_with_defaults = "INSERT INTO {} DEFAULT VALUES" + + django_test_skips = { + "opclasses are PostgreSQL only.": { + "indexes.tests.SchemaIndexesNotPostgreSQLTests." + "test_create_index_ignores_opclasses", + }, + "PostgreSQL requires casting to text.": { + "lookup.tests.LookupTests.test_textfield_exact_null", + }, + } + + @cached_property + def django_test_expected_failures(self): + expected_failures = set() + if self.uses_server_side_binding: + expected_failures.update( + { + # Parameters passed to expressions in SELECT and GROUP BY + # clauses are not recognized as the same values when using + # server-side binding cursors (#34255). + "aggregation.tests.AggregateTestCase." + "test_group_by_nested_expression_with_params", + } + ) + return expected_failures + + @cached_property + def uses_server_side_binding(self): + options = self.connection.settings_dict["OPTIONS"] + return is_psycopg3 and options.get("server_side_binding") is True + + @cached_property + def prohibits_null_characters_in_text_exception(self): + if is_psycopg3: + return DataError, "PostgreSQL text fields cannot contain NUL (0x00) bytes" + else: + return ValueError, "A string literal cannot contain NUL (0x00) characters." + + @cached_property + def introspected_field_types(self): + return { + **super().introspected_field_types, + "PositiveBigIntegerField": "BigIntegerField", + "PositiveIntegerField": "IntegerField", + "PositiveSmallIntegerField": "SmallIntegerField", + } + + @cached_property + def is_postgresql_13(self): + return self.connection.pg_version >= 130000 + + @cached_property + def is_postgresql_14(self): + return self.connection.pg_version >= 140000 + + has_bit_xor = property(operator.attrgetter("is_postgresql_14")) + supports_covering_spgist_indexes = property(operator.attrgetter("is_postgresql_14")) + supports_unlimited_charfield = True diff --git a/testbed/django__django/django/db/backends/postgresql/operations.py b/testbed/django__django/django/db/backends/postgresql/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..aa839f5634f8293e3564f6fd681dcfaf74f58594 --- /dev/null +++ b/testbed/django__django/django/db/backends/postgresql/operations.py @@ -0,0 +1,426 @@ +import json +from functools import lru_cache, partial + +from django.conf import settings +from django.db.backends.base.operations import BaseDatabaseOperations +from django.db.backends.postgresql.psycopg_any import ( + Inet, + Jsonb, + errors, + is_psycopg3, + mogrify, +) +from django.db.backends.utils import split_tzname_delta +from django.db.models.constants import OnConflict +from django.db.models.functions import Cast +from django.utils.regex_helper import _lazy_re_compile + + +@lru_cache +def get_json_dumps(encoder): + if encoder is None: + return json.dumps + return partial(json.dumps, cls=encoder) + + +class DatabaseOperations(BaseDatabaseOperations): + cast_char_field_without_max_length = "varchar" + explain_prefix = "EXPLAIN" + explain_options = frozenset( + [ + "ANALYZE", + "BUFFERS", + "COSTS", + "SETTINGS", + "SUMMARY", + "TIMING", + "VERBOSE", + "WAL", + ] + ) + cast_data_types = { + "AutoField": "integer", + "BigAutoField": "bigint", + "SmallAutoField": "smallint", + } + + if is_psycopg3: + from psycopg.types import numeric + + integerfield_type_map = { + "SmallIntegerField": numeric.Int2, + "IntegerField": numeric.Int4, + "BigIntegerField": numeric.Int8, + "PositiveSmallIntegerField": numeric.Int2, + "PositiveIntegerField": numeric.Int4, + "PositiveBigIntegerField": numeric.Int8, + } + + def unification_cast_sql(self, output_field): + internal_type = output_field.get_internal_type() + if internal_type in ( + "GenericIPAddressField", + "IPAddressField", + "TimeField", + "UUIDField", + ): + # PostgreSQL will resolve a union as type 'text' if input types are + # 'unknown'. + # https://www.postgresql.org/docs/current/typeconv-union-case.html + # These fields cannot be implicitly cast back in the default + # PostgreSQL configuration so we need to explicitly cast them. + # We must also remove components of the type within brackets: + # varchar(255) -> varchar. + return ( + "CAST(%%s AS %s)" % output_field.db_type(self.connection).split("(")[0] + ) + return "%s" + + # EXTRACT format cannot be passed in parameters. + _extract_format_re = _lazy_re_compile(r"[A-Z_]+") + + def date_extract_sql(self, lookup_type, sql, params): + # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT + if lookup_type == "week_day": + # For consistency across backends, we return Sunday=1, Saturday=7. + return f"EXTRACT(DOW FROM {sql}) + 1", params + elif lookup_type == "iso_week_day": + return f"EXTRACT(ISODOW FROM {sql})", params + elif lookup_type == "iso_year": + return f"EXTRACT(ISOYEAR FROM {sql})", params + + lookup_type = lookup_type.upper() + if not self._extract_format_re.fullmatch(lookup_type): + raise ValueError(f"Invalid lookup type: {lookup_type!r}") + return f"EXTRACT({lookup_type} FROM {sql})", params + + def date_trunc_sql(self, lookup_type, sql, params, tzname=None): + sql, params = self._convert_sql_to_tz(sql, params, tzname) + # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC + return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params) + + def _prepare_tzname_delta(self, tzname): + tzname, sign, offset = split_tzname_delta(tzname) + if offset: + sign = "-" if sign == "+" else "+" + return f"{tzname}{sign}{offset}" + return tzname + + def _convert_sql_to_tz(self, sql, params, tzname): + if tzname and settings.USE_TZ: + tzname_param = self._prepare_tzname_delta(tzname) + return f"{sql} AT TIME ZONE %s", (*params, tzname_param) + return sql, params + + def datetime_cast_date_sql(self, sql, params, tzname): + sql, params = self._convert_sql_to_tz(sql, params, tzname) + return f"({sql})::date", params + + def datetime_cast_time_sql(self, sql, params, tzname): + sql, params = self._convert_sql_to_tz(sql, params, tzname) + return f"({sql})::time", params + + def datetime_extract_sql(self, lookup_type, sql, params, tzname): + sql, params = self._convert_sql_to_tz(sql, params, tzname) + if lookup_type == "second": + # Truncate fractional seconds. + return f"EXTRACT(SECOND FROM DATE_TRUNC(%s, {sql}))", ("second", *params) + return self.date_extract_sql(lookup_type, sql, params) + + def datetime_trunc_sql(self, lookup_type, sql, params, tzname): + sql, params = self._convert_sql_to_tz(sql, params, tzname) + # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC + return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params) + + def time_extract_sql(self, lookup_type, sql, params): + if lookup_type == "second": + # Truncate fractional seconds. + return f"EXTRACT(SECOND FROM DATE_TRUNC(%s, {sql}))", ("second", *params) + return self.date_extract_sql(lookup_type, sql, params) + + def time_trunc_sql(self, lookup_type, sql, params, tzname=None): + sql, params = self._convert_sql_to_tz(sql, params, tzname) + return f"DATE_TRUNC(%s, {sql})::time", (lookup_type, *params) + + def deferrable_sql(self): + return " DEFERRABLE INITIALLY DEFERRED" + + def fetch_returned_insert_rows(self, cursor): + """ + Given a cursor object that has just performed an INSERT...RETURNING + statement into a table, return the tuple of returned data. + """ + return cursor.fetchall() + + def lookup_cast(self, lookup_type, internal_type=None): + lookup = "%s" + + if lookup_type == "isnull" and internal_type in ( + "CharField", + "EmailField", + "TextField", + "CICharField", + "CIEmailField", + "CITextField", + ): + return "%s::text" + + # Cast text lookups to text to allow things like filter(x__contains=4) + if lookup_type in ( + "iexact", + "contains", + "icontains", + "startswith", + "istartswith", + "endswith", + "iendswith", + "regex", + "iregex", + ): + if internal_type in ("IPAddressField", "GenericIPAddressField"): + lookup = "HOST(%s)" + # RemovedInDjango51Warning. + elif internal_type in ("CICharField", "CIEmailField", "CITextField"): + lookup = "%s::citext" + else: + lookup = "%s::text" + + # Use UPPER(x) for case-insensitive lookups; it's faster. + if lookup_type in ("iexact", "icontains", "istartswith", "iendswith"): + lookup = "UPPER(%s)" % lookup + + return lookup + + def no_limit_value(self): + return None + + def prepare_sql_script(self, sql): + return [sql] + + def quote_name(self, name): + if name.startswith('"') and name.endswith('"'): + return name # Quoting once is enough. + return '"%s"' % name + + def compose_sql(self, sql, params): + return mogrify(sql, params, self.connection) + + def set_time_zone_sql(self): + return "SELECT set_config('TimeZone', %s, false)" + + def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): + if not tables: + return [] + + # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows us + # to truncate tables referenced by a foreign key in any other table. + sql_parts = [ + style.SQL_KEYWORD("TRUNCATE"), + ", ".join(style.SQL_FIELD(self.quote_name(table)) for table in tables), + ] + if reset_sequences: + sql_parts.append(style.SQL_KEYWORD("RESTART IDENTITY")) + if allow_cascade: + sql_parts.append(style.SQL_KEYWORD("CASCADE")) + return ["%s;" % " ".join(sql_parts)] + + def sequence_reset_by_name_sql(self, style, sequences): + # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements + # to reset sequence indices + sql = [] + for sequence_info in sequences: + table_name = sequence_info["table"] + # 'id' will be the case if it's an m2m using an autogenerated + # intermediate table (see BaseDatabaseIntrospection.sequence_list). + column_name = sequence_info["column"] or "id" + sql.append( + "%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" + % ( + style.SQL_KEYWORD("SELECT"), + style.SQL_TABLE(self.quote_name(table_name)), + style.SQL_FIELD(column_name), + ) + ) + return sql + + def tablespace_sql(self, tablespace, inline=False): + if inline: + return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) + else: + return "TABLESPACE %s" % self.quote_name(tablespace) + + def sequence_reset_sql(self, style, model_list): + from django.db import models + + output = [] + qn = self.quote_name + for model in model_list: + # Use `coalesce` to set the sequence for each model to the max pk + # value if there are records, or 1 if there are none. Set the + # `is_called` property (the third argument to `setval`) to true if + # there are records (as the max pk value is already in use), + # otherwise set it to false. Use pg_get_serial_sequence to get the + # underlying sequence name from the table name and column name. + + for f in model._meta.local_fields: + if isinstance(f, models.AutoField): + output.append( + "%s setval(pg_get_serial_sequence('%s','%s'), " + "coalesce(max(%s), 1), max(%s) %s null) %s %s;" + % ( + style.SQL_KEYWORD("SELECT"), + style.SQL_TABLE(qn(model._meta.db_table)), + style.SQL_FIELD(f.column), + style.SQL_FIELD(qn(f.column)), + style.SQL_FIELD(qn(f.column)), + style.SQL_KEYWORD("IS NOT"), + style.SQL_KEYWORD("FROM"), + style.SQL_TABLE(qn(model._meta.db_table)), + ) + ) + # Only one AutoField is allowed per model, so don't bother + # continuing. + break + return output + + def prep_for_iexact_query(self, x): + return x + + def max_name_length(self): + """ + Return the maximum length of an identifier. + + The maximum length of an identifier is 63 by default, but can be + changed by recompiling PostgreSQL after editing the NAMEDATALEN + macro in src/include/pg_config_manual.h. + + This implementation returns 63, but can be overridden by a custom + database backend that inherits most of its behavior from this one. + """ + return 63 + + def distinct_sql(self, fields, params): + if fields: + params = [param for param_list in params for param in param_list] + return (["DISTINCT ON (%s)" % ", ".join(fields)], params) + else: + return ["DISTINCT"], [] + + if is_psycopg3: + + def last_executed_query(self, cursor, sql, params): + try: + return self.compose_sql(sql, params) + except errors.DataError: + return None + + else: + + def last_executed_query(self, cursor, sql, params): + # https://www.psycopg.org/docs/cursor.html#cursor.query + # The query attribute is a Psycopg extension to the DB API 2.0. + if cursor.query is not None: + return cursor.query.decode() + return None + + def return_insert_columns(self, fields): + if not fields: + return "", () + columns = [ + "%s.%s" + % ( + self.quote_name(field.model._meta.db_table), + self.quote_name(field.column), + ) + for field in fields + ] + return "RETURNING %s" % ", ".join(columns), () + + def bulk_insert_sql(self, fields, placeholder_rows): + placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) + values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) + return "VALUES " + values_sql + + if is_psycopg3: + + def adapt_integerfield_value(self, value, internal_type): + if value is None or hasattr(value, "resolve_expression"): + return value + return self.integerfield_type_map[internal_type](value) + + def adapt_datefield_value(self, value): + return value + + def adapt_datetimefield_value(self, value): + return value + + def adapt_timefield_value(self, value): + return value + + def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): + return value + + def adapt_ipaddressfield_value(self, value): + if value: + return Inet(value) + return None + + def adapt_json_value(self, value, encoder): + return Jsonb(value, dumps=get_json_dumps(encoder)) + + def subtract_temporals(self, internal_type, lhs, rhs): + if internal_type == "DateField": + lhs_sql, lhs_params = lhs + rhs_sql, rhs_params = rhs + params = (*lhs_params, *rhs_params) + return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params + return super().subtract_temporals(internal_type, lhs, rhs) + + def explain_query_prefix(self, format=None, **options): + extra = {} + # Normalize options. + if options: + options = { + name.upper(): "true" if value else "false" + for name, value in options.items() + } + for valid_option in self.explain_options: + value = options.pop(valid_option, None) + if value is not None: + extra[valid_option] = value + prefix = super().explain_query_prefix(format, **options) + if format: + extra["FORMAT"] = format + if extra: + prefix += " (%s)" % ", ".join("%s %s" % i for i in extra.items()) + return prefix + + def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields): + if on_conflict == OnConflict.IGNORE: + return "ON CONFLICT DO NOTHING" + if on_conflict == OnConflict.UPDATE: + return "ON CONFLICT(%s) DO UPDATE SET %s" % ( + ", ".join(map(self.quote_name, unique_fields)), + ", ".join( + [ + f"{field} = EXCLUDED.{field}" + for field in map(self.quote_name, update_fields) + ] + ), + ) + return super().on_conflict_suffix_sql( + fields, + on_conflict, + update_fields, + unique_fields, + ) + + def prepare_join_on_clause(self, lhs_table, lhs_field, rhs_table, rhs_field): + lhs_expr, rhs_expr = super().prepare_join_on_clause( + lhs_table, lhs_field, rhs_table, rhs_field + ) + + if lhs_field.db_type(self.connection) != rhs_field.db_type(self.connection): + rhs_expr = Cast(rhs_expr, lhs_field) + + return lhs_expr, rhs_expr diff --git a/testbed/django__django/django/db/backends/signals.py b/testbed/django__django/django/db/backends/signals.py new file mode 100644 index 0000000000000000000000000000000000000000..a8079d0d76fd1afd429dd2c07917f8ee9bbb477a --- /dev/null +++ b/testbed/django__django/django/db/backends/signals.py @@ -0,0 +1,3 @@ +from django.dispatch import Signal + +connection_created = Signal() diff --git a/testbed/django__django/django/db/backends/sqlite3/__init__.py b/testbed/django__django/django/db/backends/sqlite3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/django__django/django/db/backends/sqlite3/client.py b/testbed/django__django/django/db/backends/sqlite3/client.py new file mode 100644 index 0000000000000000000000000000000000000000..7cee35dc812c7320ab17f58b1d41ad8ebbbb18d4 --- /dev/null +++ b/testbed/django__django/django/db/backends/sqlite3/client.py @@ -0,0 +1,10 @@ +from django.db.backends.base.client import BaseDatabaseClient + + +class DatabaseClient(BaseDatabaseClient): + executable_name = "sqlite3" + + @classmethod + def settings_to_cmd_args_env(cls, settings_dict, parameters): + args = [cls.executable_name, settings_dict["NAME"], *parameters] + return args, None diff --git a/testbed/django__django/django/db/backends/sqlite3/features.py b/testbed/django__django/django/db/backends/sqlite3/features.py new file mode 100644 index 0000000000000000000000000000000000000000..f471b72cb202874dcf01e76496571f2df25dc049 --- /dev/null +++ b/testbed/django__django/django/db/backends/sqlite3/features.py @@ -0,0 +1,167 @@ +import operator + +from django.db import transaction +from django.db.backends.base.features import BaseDatabaseFeatures +from django.db.utils import OperationalError +from django.utils.functional import cached_property + +from .base import Database + + +class DatabaseFeatures(BaseDatabaseFeatures): + minimum_database_version = (3, 21) + test_db_allows_multiple_connections = False + supports_unspecified_pk = True + supports_timezones = False + max_query_params = 999 + supports_transactions = True + atomic_transactions = False + can_rollback_ddl = True + can_create_inline_fk = False + requires_literal_defaults = True + can_clone_databases = True + supports_temporal_subtraction = True + ignores_table_name_case = True + supports_cast_with_precision = False + time_cast_precision = 3 + can_release_savepoints = True + has_case_insensitive_like = True + # Is "ALTER TABLE ... RENAME COLUMN" supported? + can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0) + # Is "ALTER TABLE ... DROP COLUMN" supported? + can_alter_table_drop_column = Database.sqlite_version_info >= (3, 35, 5) + supports_parentheses_in_compound = False + can_defer_constraint_checks = True + supports_over_clause = Database.sqlite_version_info >= (3, 25, 0) + supports_frame_range_fixed_distance = Database.sqlite_version_info >= (3, 28, 0) + supports_aggregate_filter_clause = Database.sqlite_version_info >= (3, 30, 1) + supports_order_by_nulls_modifier = Database.sqlite_version_info >= (3, 30, 0) + # NULLS LAST/FIRST emulation on < 3.30 requires subquery wrapping. + requires_compound_order_by_subquery = Database.sqlite_version_info < (3, 30) + order_by_nulls_first = True + supports_json_field_contains = False + supports_update_conflicts = Database.sqlite_version_info >= (3, 24, 0) + supports_update_conflicts_with_target = supports_update_conflicts + test_collations = { + "ci": "nocase", + "cs": "binary", + "non_default": "nocase", + } + django_test_expected_failures = { + # The django_format_dtdelta() function doesn't properly handle mixed + # Date/DateTime fields and timedeltas. + "expressions.tests.FTimeDeltaTests.test_mixed_comparisons1", + } + create_test_table_with_composite_primary_key = """ + CREATE TABLE test_table_composite_pk ( + column_1 INTEGER NOT NULL, + column_2 INTEGER NOT NULL, + PRIMARY KEY(column_1, column_2) + ) + """ + insert_test_table_with_defaults = 'INSERT INTO {} ("null") VALUES (1)' + supports_default_keyword_in_insert = False + + @cached_property + def django_test_skips(self): + skips = { + "SQLite stores values rounded to 15 significant digits.": { + "model_fields.test_decimalfield.DecimalFieldTests." + "test_fetch_from_db_without_float_rounding", + }, + "SQLite naively remakes the table on field alteration.": { + "schema.tests.SchemaTests.test_unique_no_unnecessary_fk_drops", + "schema.tests.SchemaTests.test_unique_and_reverse_m2m", + "schema.tests.SchemaTests." + "test_alter_field_default_doesnt_perform_queries", + "schema.tests.SchemaTests." + "test_rename_column_renames_deferred_sql_references", + }, + "SQLite doesn't support negative precision for ROUND().": { + "db_functions.math.test_round.RoundTests." + "test_null_with_negative_precision", + "db_functions.math.test_round.RoundTests." + "test_decimal_with_negative_precision", + "db_functions.math.test_round.RoundTests." + "test_float_with_negative_precision", + "db_functions.math.test_round.RoundTests." + "test_integer_with_negative_precision", + }, + } + if Database.sqlite_version_info < (3, 27): + skips.update( + { + "Nondeterministic failure on SQLite < 3.27.": { + "expressions_window.tests.WindowFunctionTests." + "test_subquery_row_range_rank", + }, + } + ) + if self.connection.is_in_memory_db(): + skips.update( + { + "the sqlite backend's close() method is a no-op when using an " + "in-memory database": { + "servers.test_liveserverthread.LiveServerThreadTest." + "test_closes_connections", + "servers.tests.LiveServerTestCloseConnectionTest." + "test_closes_connections", + }, + "For SQLite in-memory tests, closing the connection destroys" + "the database.": { + "test_utils.tests.AssertNumQueriesUponConnectionTests." + "test_ignores_connection_configuration_queries", + }, + } + ) + else: + skips.update( + { + "Only connections to in-memory SQLite databases are passed to the " + "server thread.": { + "servers.tests.LiveServerInMemoryDatabaseLockTest." + "test_in_memory_database_lock", + }, + "multiprocessing's start method is checked only for in-memory " + "SQLite databases": { + "backends.sqlite.test_creation.TestDbSignatureTests." + "test_get_test_db_clone_settings_not_supported", + }, + } + ) + return skips + + @cached_property + def supports_atomic_references_rename(self): + return Database.sqlite_version_info >= (3, 26, 0) + + @cached_property + def introspected_field_types(self): + return { + **super().introspected_field_types, + "BigAutoField": "AutoField", + "DurationField": "BigIntegerField", + "GenericIPAddressField": "CharField", + "SmallAutoField": "AutoField", + } + + @cached_property + def supports_json_field(self): + with self.connection.cursor() as cursor: + try: + with transaction.atomic(self.connection.alias): + cursor.execute('SELECT JSON(\'{"a": "b"}\')') + except OperationalError: + return False + return True + + can_introspect_json_field = property(operator.attrgetter("supports_json_field")) + has_json_object_function = property(operator.attrgetter("supports_json_field")) + + @cached_property + def can_return_columns_from_insert(self): + return Database.sqlite_version_info >= (3, 35) + + can_return_rows_from_bulk_insert = property( + operator.attrgetter("can_return_columns_from_insert") + ) diff --git a/testbed/django__django/django/db/backends/sqlite3/introspection.py b/testbed/django__django/django/db/backends/sqlite3/introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..d2fe3d8c7135486d1942f8af0129163568ac2b17 --- /dev/null +++ b/testbed/django__django/django/db/backends/sqlite3/introspection.py @@ -0,0 +1,434 @@ +from collections import namedtuple + +import sqlparse + +from django.db import DatabaseError +from django.db.backends.base.introspection import BaseDatabaseIntrospection +from django.db.backends.base.introspection import FieldInfo as BaseFieldInfo +from django.db.backends.base.introspection import TableInfo +from django.db.models import Index +from django.utils.regex_helper import _lazy_re_compile + +FieldInfo = namedtuple( + "FieldInfo", BaseFieldInfo._fields + ("pk", "has_json_constraint") +) + +field_size_re = _lazy_re_compile(r"^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$") + + +def get_field_size(name): + """Extract the size number from a "varchar(11)" type name""" + m = field_size_re.search(name) + return int(m[1]) if m else None + + +# This light wrapper "fakes" a dictionary interface, because some SQLite data +# types include variables in them -- e.g. "varchar(30)" -- and can't be matched +# as a simple dictionary lookup. +class FlexibleFieldLookupDict: + # Maps SQL types to Django Field types. Some of the SQL types have multiple + # entries here because SQLite allows for anything and doesn't normalize the + # field type; it uses whatever was given. + base_data_types_reverse = { + "bool": "BooleanField", + "boolean": "BooleanField", + "smallint": "SmallIntegerField", + "smallint unsigned": "PositiveSmallIntegerField", + "smallinteger": "SmallIntegerField", + "int": "IntegerField", + "integer": "IntegerField", + "bigint": "BigIntegerField", + "integer unsigned": "PositiveIntegerField", + "bigint unsigned": "PositiveBigIntegerField", + "decimal": "DecimalField", + "real": "FloatField", + "text": "TextField", + "char": "CharField", + "varchar": "CharField", + "blob": "BinaryField", + "date": "DateField", + "datetime": "DateTimeField", + "time": "TimeField", + } + + def __getitem__(self, key): + key = key.lower().split("(", 1)[0].strip() + return self.base_data_types_reverse[key] + + +class DatabaseIntrospection(BaseDatabaseIntrospection): + data_types_reverse = FlexibleFieldLookupDict() + + def get_field_type(self, data_type, description): + field_type = super().get_field_type(data_type, description) + if description.pk and field_type in { + "BigIntegerField", + "IntegerField", + "SmallIntegerField", + }: + # No support for BigAutoField or SmallAutoField as SQLite treats + # all integer primary keys as signed 64-bit integers. + return "AutoField" + if description.has_json_constraint: + return "JSONField" + return field_type + + def get_table_list(self, cursor): + """Return a list of table and view names in the current database.""" + # Skip the sqlite_sequence system table used for autoincrement key + # generation. + cursor.execute( + """ + SELECT name, type FROM sqlite_master + WHERE type in ('table', 'view') AND NOT name='sqlite_sequence' + ORDER BY name""" + ) + return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()] + + def get_table_description(self, cursor, table_name): + """ + Return a description of the table with the DB-API cursor.description + interface. + """ + cursor.execute( + "PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name) + ) + table_info = cursor.fetchall() + if not table_info: + raise DatabaseError(f"Table {table_name} does not exist (empty pragma).") + collations = self._get_column_collations(cursor, table_name) + json_columns = set() + if self.connection.features.can_introspect_json_field: + for line in table_info: + column = line[1] + json_constraint_sql = '%%json_valid("%s")%%' % column + has_json_constraint = cursor.execute( + """ + SELECT sql + FROM sqlite_master + WHERE + type = 'table' AND + name = %s AND + sql LIKE %s + """, + [table_name, json_constraint_sql], + ).fetchone() + if has_json_constraint: + json_columns.add(column) + return [ + FieldInfo( + name, + data_type, + get_field_size(data_type), + None, + None, + None, + not notnull, + default, + collations.get(name), + pk == 1, + name in json_columns, + ) + for cid, name, data_type, notnull, default, pk in table_info + ] + + def get_sequences(self, cursor, table_name, table_fields=()): + pk_col = self.get_primary_key_column(cursor, table_name) + return [{"table": table_name, "column": pk_col}] + + def get_relations(self, cursor, table_name): + """ + Return a dictionary of {column_name: (ref_column_name, ref_table_name)} + representing all foreign keys in the given table. + """ + cursor.execute( + "PRAGMA foreign_key_list(%s)" % self.connection.ops.quote_name(table_name) + ) + return { + column_name: (ref_column_name, ref_table_name) + for ( + _, + _, + ref_table_name, + column_name, + ref_column_name, + *_, + ) in cursor.fetchall() + } + + def get_primary_key_columns(self, cursor, table_name): + cursor.execute( + "PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name) + ) + return [name for _, name, *_, pk in cursor.fetchall() if pk] + + def _parse_column_or_constraint_definition(self, tokens, columns): + token = None + is_constraint_definition = None + field_name = None + constraint_name = None + unique = False + unique_columns = [] + check = False + check_columns = [] + braces_deep = 0 + for token in tokens: + if token.match(sqlparse.tokens.Punctuation, "("): + braces_deep += 1 + elif token.match(sqlparse.tokens.Punctuation, ")"): + braces_deep -= 1 + if braces_deep < 0: + # End of columns and constraints for table definition. + break + elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ","): + # End of current column or constraint definition. + break + # Detect column or constraint definition by first token. + if is_constraint_definition is None: + is_constraint_definition = token.match( + sqlparse.tokens.Keyword, "CONSTRAINT" + ) + if is_constraint_definition: + continue + if is_constraint_definition: + # Detect constraint name by second token. + if constraint_name is None: + if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): + constraint_name = token.value + elif token.ttype == sqlparse.tokens.Literal.String.Symbol: + constraint_name = token.value[1:-1] + # Start constraint columns parsing after UNIQUE keyword. + if token.match(sqlparse.tokens.Keyword, "UNIQUE"): + unique = True + unique_braces_deep = braces_deep + elif unique: + if unique_braces_deep == braces_deep: + if unique_columns: + # Stop constraint parsing. + unique = False + continue + if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): + unique_columns.append(token.value) + elif token.ttype == sqlparse.tokens.Literal.String.Symbol: + unique_columns.append(token.value[1:-1]) + else: + # Detect field name by first token. + if field_name is None: + if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): + field_name = token.value + elif token.ttype == sqlparse.tokens.Literal.String.Symbol: + field_name = token.value[1:-1] + if token.match(sqlparse.tokens.Keyword, "UNIQUE"): + unique_columns = [field_name] + # Start constraint columns parsing after CHECK keyword. + if token.match(sqlparse.tokens.Keyword, "CHECK"): + check = True + check_braces_deep = braces_deep + elif check: + if check_braces_deep == braces_deep: + if check_columns: + # Stop constraint parsing. + check = False + continue + if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): + if token.value in columns: + check_columns.append(token.value) + elif token.ttype == sqlparse.tokens.Literal.String.Symbol: + if token.value[1:-1] in columns: + check_columns.append(token.value[1:-1]) + unique_constraint = ( + { + "unique": True, + "columns": unique_columns, + "primary_key": False, + "foreign_key": None, + "check": False, + "index": False, + } + if unique_columns + else None + ) + check_constraint = ( + { + "check": True, + "columns": check_columns, + "primary_key": False, + "unique": False, + "foreign_key": None, + "index": False, + } + if check_columns + else None + ) + return constraint_name, unique_constraint, check_constraint, token + + def _parse_table_constraints(self, sql, columns): + # Check constraint parsing is based of SQLite syntax diagram. + # https://www.sqlite.org/syntaxdiagrams.html#table-constraint + statement = sqlparse.parse(sql)[0] + constraints = {} + unnamed_constrains_index = 0 + tokens = (token for token in statement.flatten() if not token.is_whitespace) + # Go to columns and constraint definition + for token in tokens: + if token.match(sqlparse.tokens.Punctuation, "("): + break + # Parse columns and constraint definition + while True: + ( + constraint_name, + unique, + check, + end_token, + ) = self._parse_column_or_constraint_definition(tokens, columns) + if unique: + if constraint_name: + constraints[constraint_name] = unique + else: + unnamed_constrains_index += 1 + constraints[ + "__unnamed_constraint_%s__" % unnamed_constrains_index + ] = unique + if check: + if constraint_name: + constraints[constraint_name] = check + else: + unnamed_constrains_index += 1 + constraints[ + "__unnamed_constraint_%s__" % unnamed_constrains_index + ] = check + if end_token.match(sqlparse.tokens.Punctuation, ")"): + break + return constraints + + def get_constraints(self, cursor, table_name): + """ + Retrieve any constraints or keys (unique, pk, fk, check, index) across + one or more columns. + """ + constraints = {} + # Find inline check constraints. + try: + table_schema = cursor.execute( + "SELECT sql FROM sqlite_master WHERE type='table' and name=%s" + % (self.connection.ops.quote_name(table_name),) + ).fetchone()[0] + except TypeError: + # table_name is a view. + pass + else: + columns = { + info.name for info in self.get_table_description(cursor, table_name) + } + constraints.update(self._parse_table_constraints(table_schema, columns)) + + # Get the index info + cursor.execute( + "PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name) + ) + for row in cursor.fetchall(): + # SQLite 3.8.9+ has 5 columns, however older versions only give 3 + # columns. Discard last 2 columns if there. + number, index, unique = row[:3] + cursor.execute( + "SELECT sql FROM sqlite_master " + "WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index) + ) + # There's at most one row. + (sql,) = cursor.fetchone() or (None,) + # Inline constraints are already detected in + # _parse_table_constraints(). The reasons to avoid fetching inline + # constraints from `PRAGMA index_list` are: + # - Inline constraints can have a different name and information + # than what `PRAGMA index_list` gives. + # - Not all inline constraints may appear in `PRAGMA index_list`. + if not sql: + # An inline constraint + continue + # Get the index info for that index + cursor.execute( + "PRAGMA index_info(%s)" % self.connection.ops.quote_name(index) + ) + for index_rank, column_rank, column in cursor.fetchall(): + if index not in constraints: + constraints[index] = { + "columns": [], + "primary_key": False, + "unique": bool(unique), + "foreign_key": None, + "check": False, + "index": True, + } + constraints[index]["columns"].append(column) + # Add type and column orders for indexes + if constraints[index]["index"]: + # SQLite doesn't support any index type other than b-tree + constraints[index]["type"] = Index.suffix + orders = self._get_index_columns_orders(sql) + if orders is not None: + constraints[index]["orders"] = orders + # Get the PK + pk_columns = self.get_primary_key_columns(cursor, table_name) + if pk_columns: + # SQLite doesn't actually give a name to the PK constraint, + # so we invent one. This is fine, as the SQLite backend never + # deletes PK constraints by name, as you can't delete constraints + # in SQLite; we remake the table with a new PK instead. + constraints["__primary__"] = { + "columns": pk_columns, + "primary_key": True, + "unique": False, # It's not actually a unique constraint. + "foreign_key": None, + "check": False, + "index": False, + } + relations = enumerate(self.get_relations(cursor, table_name).items()) + constraints.update( + { + f"fk_{index}": { + "columns": [column_name], + "primary_key": False, + "unique": False, + "foreign_key": (ref_table_name, ref_column_name), + "check": False, + "index": False, + } + for index, (column_name, (ref_column_name, ref_table_name)) in relations + } + ) + return constraints + + def _get_index_columns_orders(self, sql): + tokens = sqlparse.parse(sql)[0] + for token in tokens: + if isinstance(token, sqlparse.sql.Parenthesis): + columns = str(token).strip("()").split(", ") + return ["DESC" if info.endswith("DESC") else "ASC" for info in columns] + return None + + def _get_column_collations(self, cursor, table_name): + row = cursor.execute( + """ + SELECT sql + FROM sqlite_master + WHERE type = 'table' AND name = %s + """, + [table_name], + ).fetchone() + if not row: + return {} + + sql = row[0] + columns = str(sqlparse.parse(sql)[0][-1]).strip("()").split(", ") + collations = {} + for column in columns: + tokens = column[1:].split() + column_name = tokens[0].strip('"') + for index, token in enumerate(tokens): + if token == "COLLATE": + collation = tokens[index + 1] + break + else: + collation = None + collations[column_name] = collation + return collations diff --git a/testbed/django__django/django/db/backends/sqlite3/schema.py b/testbed/django__django/django/db/backends/sqlite3/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..46ba07092d8edcbc8cbf5de5019dcca58f68ee92 --- /dev/null +++ b/testbed/django__django/django/db/backends/sqlite3/schema.py @@ -0,0 +1,592 @@ +import copy +from decimal import Decimal + +from django.apps.registry import Apps +from django.db import NotSupportedError +from django.db.backends.base.schema import BaseDatabaseSchemaEditor +from django.db.backends.ddl_references import Statement +from django.db.backends.utils import strip_quotes +from django.db.models import NOT_PROVIDED, UniqueConstraint +from django.db.transaction import atomic + + +class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): + sql_delete_table = "DROP TABLE %(table)s" + sql_create_fk = None + sql_create_inline_fk = ( + "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" + ) + sql_create_column_inline_fk = sql_create_inline_fk + sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" + sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)" + sql_delete_unique = "DROP INDEX %(name)s" + + def __enter__(self): + # Some SQLite schema alterations need foreign key constraints to be + # disabled. Enforce it here for the duration of the schema edition. + if not self.connection.disable_constraint_checking(): + raise NotSupportedError( + "SQLite schema editor cannot be used while foreign key " + "constraint checks are enabled. Make sure to disable them " + "before entering a transaction.atomic() context because " + "SQLite does not support disabling them in the middle of " + "a multi-statement transaction." + ) + return super().__enter__() + + def __exit__(self, exc_type, exc_value, traceback): + self.connection.check_constraints() + super().__exit__(exc_type, exc_value, traceback) + self.connection.enable_constraint_checking() + + def quote_value(self, value): + # The backend "mostly works" without this function and there are use + # cases for compiling Python without the sqlite3 libraries (e.g. + # security hardening). + try: + import sqlite3 + + value = sqlite3.adapt(value) + except ImportError: + pass + except sqlite3.ProgrammingError: + pass + # Manual emulation of SQLite parameter quoting + if isinstance(value, bool): + return str(int(value)) + elif isinstance(value, (Decimal, float, int)): + return str(value) + elif isinstance(value, str): + return "'%s'" % value.replace("'", "''") + elif value is None: + return "NULL" + elif isinstance(value, (bytes, bytearray, memoryview)): + # Bytes are only allowed for BLOB fields, encoded as string + # literals containing hexadecimal data and preceded by a single "X" + # character. + return "X'%s'" % value.hex() + else: + raise ValueError( + "Cannot quote parameter value %r of type %s" % (value, type(value)) + ) + + def prepare_default(self, value): + return self.quote_value(value) + + def _is_referenced_by_fk_constraint( + self, table_name, column_name=None, ignore_self=False + ): + """ + Return whether or not the provided table name is referenced by another + one. If `column_name` is specified, only references pointing to that + column are considered. If `ignore_self` is True, self-referential + constraints are ignored. + """ + with self.connection.cursor() as cursor: + for other_table in self.connection.introspection.get_table_list(cursor): + if ignore_self and other_table.name == table_name: + continue + relations = self.connection.introspection.get_relations( + cursor, other_table.name + ) + for constraint_column, constraint_table in relations.values(): + if constraint_table == table_name and ( + column_name is None or constraint_column == column_name + ): + return True + return False + + def alter_db_table( + self, model, old_db_table, new_db_table, disable_constraints=True + ): + if ( + not self.connection.features.supports_atomic_references_rename + and disable_constraints + and self._is_referenced_by_fk_constraint(old_db_table) + ): + if self.connection.in_atomic_block: + raise NotSupportedError( + ( + "Renaming the %r table while in a transaction is not " + "supported on SQLite < 3.26 because it would break referential " + "integrity. Try adding `atomic = False` to the Migration class." + ) + % old_db_table + ) + self.connection.enable_constraint_checking() + super().alter_db_table(model, old_db_table, new_db_table) + self.connection.disable_constraint_checking() + else: + super().alter_db_table(model, old_db_table, new_db_table) + + def alter_field(self, model, old_field, new_field, strict=False): + if not self._field_should_be_altered(old_field, new_field): + return + old_field_name = old_field.name + table_name = model._meta.db_table + _, old_column_name = old_field.get_attname_column() + if ( + new_field.name != old_field_name + and not self.connection.features.supports_atomic_references_rename + and self._is_referenced_by_fk_constraint( + table_name, old_column_name, ignore_self=True + ) + ): + if self.connection.in_atomic_block: + raise NotSupportedError( + ( + "Renaming the %r.%r column while in a transaction is not " + "supported on SQLite < 3.26 because it would break referential " + "integrity. Try adding `atomic = False` to the Migration class." + ) + % (model._meta.db_table, old_field_name) + ) + with atomic(self.connection.alias): + super().alter_field(model, old_field, new_field, strict=strict) + # Follow SQLite's documented procedure for performing changes + # that don't affect the on-disk content. + # https://sqlite.org/lang_altertable.html#otheralter + with self.connection.cursor() as cursor: + schema_version = cursor.execute("PRAGMA schema_version").fetchone()[ + 0 + ] + cursor.execute("PRAGMA writable_schema = 1") + references_template = ' REFERENCES "%s" ("%%s") ' % table_name + new_column_name = new_field.get_attname_column()[1] + search = references_template % old_column_name + replacement = references_template % new_column_name + cursor.execute( + "UPDATE sqlite_master SET sql = replace(sql, %s, %s)", + (search, replacement), + ) + cursor.execute("PRAGMA schema_version = %d" % (schema_version + 1)) + cursor.execute("PRAGMA writable_schema = 0") + # The integrity check will raise an exception and rollback + # the transaction if the sqlite_master updates corrupt the + # database. + cursor.execute("PRAGMA integrity_check") + # Perform a VACUUM to refresh the database representation from + # the sqlite_master table. + with self.connection.cursor() as cursor: + cursor.execute("VACUUM") + else: + super().alter_field(model, old_field, new_field, strict=strict) + + def _remake_table( + self, model, create_field=None, delete_field=None, alter_fields=None + ): + """ + Shortcut to transform a model from old_model into new_model + + This follows the correct procedure to perform non-rename or column + addition operations based on SQLite's documentation + + https://www.sqlite.org/lang_altertable.html#caution + + The essential steps are: + 1. Create a table with the updated definition called "new__app_model" + 2. Copy the data from the existing "app_model" table to the new table + 3. Drop the "app_model" table + 4. Rename the "new__app_model" table to "app_model" + 5. Restore any index of the previous "app_model" table. + """ + + # Self-referential fields must be recreated rather than copied from + # the old model to ensure their remote_field.field_name doesn't refer + # to an altered field. + def is_self_referential(f): + return f.is_relation and f.remote_field.model is model + + # Work out the new fields dict / mapping + body = { + f.name: f.clone() if is_self_referential(f) else f + for f in model._meta.local_concrete_fields + } + # Since mapping might mix column names and default values, + # its values must be already quoted. + mapping = { + f.column: self.quote_name(f.column) + for f in model._meta.local_concrete_fields + } + # This maps field names (not columns) for things like unique_together + rename_mapping = {} + # If any of the new or altered fields is introducing a new PK, + # remove the old one + restore_pk_field = None + alter_fields = alter_fields or [] + if getattr(create_field, "primary_key", False) or any( + getattr(new_field, "primary_key", False) for _, new_field in alter_fields + ): + for name, field in list(body.items()): + if field.primary_key and not any( + # Do not remove the old primary key when an altered field + # that introduces a primary key is the same field. + name == new_field.name + for _, new_field in alter_fields + ): + field.primary_key = False + restore_pk_field = field + if field.auto_created: + del body[name] + del mapping[field.column] + # Add in any created fields + if create_field: + body[create_field.name] = create_field + # Choose a default and insert it into the copy map + if ( + create_field.db_default is NOT_PROVIDED + and not create_field.many_to_many + and create_field.concrete + ): + mapping[create_field.column] = self.prepare_default( + self.effective_default(create_field) + ) + # Add in any altered fields + for alter_field in alter_fields: + old_field, new_field = alter_field + body.pop(old_field.name, None) + mapping.pop(old_field.column, None) + body[new_field.name] = new_field + if old_field.null and not new_field.null: + if new_field.db_default is NOT_PROVIDED: + default = self.prepare_default(self.effective_default(new_field)) + else: + default, _ = self.db_default_sql(new_field) + case_sql = "coalesce(%(col)s, %(default)s)" % { + "col": self.quote_name(old_field.column), + "default": default, + } + mapping[new_field.column] = case_sql + else: + mapping[new_field.column] = self.quote_name(old_field.column) + rename_mapping[old_field.name] = new_field.name + # Remove any deleted fields + if delete_field: + del body[delete_field.name] + del mapping[delete_field.column] + # Remove any implicit M2M tables + if ( + delete_field.many_to_many + and delete_field.remote_field.through._meta.auto_created + ): + return self.delete_model(delete_field.remote_field.through) + # Work inside a new app registry + apps = Apps() + + # Work out the new value of unique_together, taking renames into + # account + unique_together = [ + [rename_mapping.get(n, n) for n in unique] + for unique in model._meta.unique_together + ] + + # RemovedInDjango51Warning. + # Work out the new value for index_together, taking renames into + # account + index_together = [ + [rename_mapping.get(n, n) for n in index] + for index in model._meta.index_together + ] + + indexes = model._meta.indexes + if delete_field: + indexes = [ + index for index in indexes if delete_field.name not in index.fields + ] + + constraints = list(model._meta.constraints) + + # Provide isolated instances of the fields to the new model body so + # that the existing model's internals aren't interfered with when + # the dummy model is constructed. + body_copy = copy.deepcopy(body) + + # Construct a new model with the new fields to allow self referential + # primary key to resolve to. This model won't ever be materialized as a + # table and solely exists for foreign key reference resolution purposes. + # This wouldn't be required if the schema editor was operating on model + # states instead of rendered models. + meta_contents = { + "app_label": model._meta.app_label, + "db_table": model._meta.db_table, + "unique_together": unique_together, + "index_together": index_together, # RemovedInDjango51Warning. + "indexes": indexes, + "constraints": constraints, + "apps": apps, + } + meta = type("Meta", (), meta_contents) + body_copy["Meta"] = meta + body_copy["__module__"] = model.__module__ + type(model._meta.object_name, model.__bases__, body_copy) + + # Construct a model with a renamed table name. + body_copy = copy.deepcopy(body) + meta_contents = { + "app_label": model._meta.app_label, + "db_table": "new__%s" % strip_quotes(model._meta.db_table), + "unique_together": unique_together, + "index_together": index_together, # RemovedInDjango51Warning. + "indexes": indexes, + "constraints": constraints, + "apps": apps, + } + meta = type("Meta", (), meta_contents) + body_copy["Meta"] = meta + body_copy["__module__"] = model.__module__ + new_model = type("New%s" % model._meta.object_name, model.__bases__, body_copy) + + # Create a new table with the updated schema. + self.create_model(new_model) + + # Copy data from the old table into the new table + self.execute( + "INSERT INTO %s (%s) SELECT %s FROM %s" + % ( + self.quote_name(new_model._meta.db_table), + ", ".join(self.quote_name(x) for x in mapping), + ", ".join(mapping.values()), + self.quote_name(model._meta.db_table), + ) + ) + + # Delete the old table to make way for the new + self.delete_model(model, handle_autom2m=False) + + # Rename the new table to take way for the old + self.alter_db_table( + new_model, + new_model._meta.db_table, + model._meta.db_table, + disable_constraints=False, + ) + + # Run deferred SQL on correct table + for sql in self.deferred_sql: + self.execute(sql) + self.deferred_sql = [] + # Fix any PK-removed field + if restore_pk_field: + restore_pk_field.primary_key = True + + def delete_model(self, model, handle_autom2m=True): + if handle_autom2m: + super().delete_model(model) + else: + # Delete the table (and only that) + self.execute( + self.sql_delete_table + % { + "table": self.quote_name(model._meta.db_table), + } + ) + # Remove all deferred statements referencing the deleted table. + for sql in list(self.deferred_sql): + if isinstance(sql, Statement) and sql.references_table( + model._meta.db_table + ): + self.deferred_sql.remove(sql) + + def add_field(self, model, field): + """Create a field on a model.""" + from django.db.models.expressions import Value + + # Special-case implicit M2M tables. + if field.many_to_many and field.remote_field.through._meta.auto_created: + self.create_model(field.remote_field.through) + elif ( + # Primary keys and unique fields are not supported in ALTER TABLE + # ADD COLUMN. + field.primary_key + or field.unique + or not field.null + # Fields with default values cannot by handled by ALTER TABLE ADD + # COLUMN statement because DROP DEFAULT is not supported in + # ALTER TABLE. + or self.effective_default(field) is not None + # Fields with non-constant defaults cannot by handled by ALTER + # TABLE ADD COLUMN statement. + or ( + field.db_default is not NOT_PROVIDED + and not isinstance(field.db_default, Value) + ) + ): + self._remake_table(model, create_field=field) + else: + super().add_field(model, field) + + def remove_field(self, model, field): + """ + Remove a field from a model. Usually involves deleting a column, + but for M2Ms may involve deleting a table. + """ + # M2M fields are a special case + if field.many_to_many: + # For implicit M2M tables, delete the auto-created table + if field.remote_field.through._meta.auto_created: + self.delete_model(field.remote_field.through) + # For explicit "through" M2M fields, do nothing + elif ( + self.connection.features.can_alter_table_drop_column + # Primary keys, unique fields, indexed fields, and foreign keys are + # not supported in ALTER TABLE DROP COLUMN. + and not field.primary_key + and not field.unique + and not field.db_index + and not (field.remote_field and field.db_constraint) + ): + super().remove_field(model, field) + # For everything else, remake. + else: + # It might not actually have a column behind it + if field.db_parameters(connection=self.connection)["type"] is None: + return + self._remake_table(model, delete_field=field) + + def _alter_field( + self, + model, + old_field, + new_field, + old_type, + new_type, + old_db_params, + new_db_params, + strict=False, + ): + """Perform a "physical" (non-ManyToMany) field update.""" + # Use "ALTER TABLE ... RENAME COLUMN" if only the column name + # changed and there aren't any constraints. + if ( + self.connection.features.can_alter_table_rename_column + and old_field.column != new_field.column + and self.column_sql(model, old_field) == self.column_sql(model, new_field) + and not ( + old_field.remote_field + and old_field.db_constraint + or new_field.remote_field + and new_field.db_constraint + ) + ): + return self.execute( + self._rename_field_sql( + model._meta.db_table, old_field, new_field, new_type + ) + ) + # Alter by remaking table + self._remake_table(model, alter_fields=[(old_field, new_field)]) + # Rebuild tables with FKs pointing to this field. + old_collation = old_db_params.get("collation") + new_collation = new_db_params.get("collation") + if new_field.unique and ( + old_type != new_type or old_collation != new_collation + ): + related_models = set() + opts = new_field.model._meta + for remote_field in opts.related_objects: + # Ignore self-relationship since the table was already rebuilt. + if remote_field.related_model == model: + continue + if not remote_field.many_to_many: + if remote_field.field_name == new_field.name: + related_models.add(remote_field.related_model) + elif new_field.primary_key and remote_field.through._meta.auto_created: + related_models.add(remote_field.through) + if new_field.primary_key: + for many_to_many in opts.many_to_many: + # Ignore self-relationship since the table was already rebuilt. + if many_to_many.related_model == model: + continue + if many_to_many.remote_field.through._meta.auto_created: + related_models.add(many_to_many.remote_field.through) + for related_model in related_models: + self._remake_table(related_model) + + def _alter_many_to_many(self, model, old_field, new_field, strict): + """Alter M2Ms to repoint their to= endpoints.""" + if ( + old_field.remote_field.through._meta.db_table + == new_field.remote_field.through._meta.db_table + ): + # The field name didn't change, but some options did, so we have to + # propagate this altering. + self._remake_table( + old_field.remote_field.through, + alter_fields=[ + ( + # The field that points to the target model is needed, + # so that table can be remade with the new m2m field - + # this is m2m_reverse_field_name(). + old_field.remote_field.through._meta.get_field( + old_field.m2m_reverse_field_name() + ), + new_field.remote_field.through._meta.get_field( + new_field.m2m_reverse_field_name() + ), + ), + ( + # The field that points to the model itself is needed, + # so that table can be remade with the new self field - + # this is m2m_field_name(). + old_field.remote_field.through._meta.get_field( + old_field.m2m_field_name() + ), + new_field.remote_field.through._meta.get_field( + new_field.m2m_field_name() + ), + ), + ], + ) + return + + # Make a new through table + self.create_model(new_field.remote_field.through) + # Copy the data across + self.execute( + "INSERT INTO %s (%s) SELECT %s FROM %s" + % ( + self.quote_name(new_field.remote_field.through._meta.db_table), + ", ".join( + [ + "id", + new_field.m2m_column_name(), + new_field.m2m_reverse_name(), + ] + ), + ", ".join( + [ + "id", + old_field.m2m_column_name(), + old_field.m2m_reverse_name(), + ] + ), + self.quote_name(old_field.remote_field.through._meta.db_table), + ) + ) + # Delete the old through table + self.delete_model(old_field.remote_field.through) + + def add_constraint(self, model, constraint): + if isinstance(constraint, UniqueConstraint) and ( + constraint.condition + or constraint.contains_expressions + or constraint.include + or constraint.deferrable + ): + super().add_constraint(model, constraint) + else: + self._remake_table(model) + + def remove_constraint(self, model, constraint): + if isinstance(constraint, UniqueConstraint) and ( + constraint.condition + or constraint.contains_expressions + or constraint.include + or constraint.deferrable + ): + super().remove_constraint(model, constraint) + else: + self._remake_table(model) + + def _collate_sql(self, collation): + return "COLLATE " + collation diff --git a/testbed/django__django/django/db/backends/utils.py b/testbed/django__django/django/db/backends/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f1acf98a8b8f5813ca3451b88921c9a94a8bc1c8 --- /dev/null +++ b/testbed/django__django/django/db/backends/utils.py @@ -0,0 +1,320 @@ +import datetime +import decimal +import functools +import logging +import time +from contextlib import contextmanager +from hashlib import md5 + +from django.db import NotSupportedError +from django.utils.dateparse import parse_time + +logger = logging.getLogger("django.db.backends") + + +class CursorWrapper: + def __init__(self, cursor, db): + self.cursor = cursor + self.db = db + + WRAP_ERROR_ATTRS = frozenset(["fetchone", "fetchmany", "fetchall", "nextset"]) + + def __getattr__(self, attr): + cursor_attr = getattr(self.cursor, attr) + if attr in CursorWrapper.WRAP_ERROR_ATTRS: + return self.db.wrap_database_errors(cursor_attr) + else: + return cursor_attr + + def __iter__(self): + with self.db.wrap_database_errors: + yield from self.cursor + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + # Close instead of passing through to avoid backend-specific behavior + # (#17671). Catch errors liberally because errors in cleanup code + # aren't useful. + try: + self.close() + except self.db.Database.Error: + pass + + # The following methods cannot be implemented in __getattr__, because the + # code must run when the method is invoked, not just when it is accessed. + + def callproc(self, procname, params=None, kparams=None): + # Keyword parameters for callproc aren't supported in PEP 249, but the + # database driver may support them (e.g. cx_Oracle). + if kparams is not None and not self.db.features.supports_callproc_kwargs: + raise NotSupportedError( + "Keyword parameters for callproc are not supported on this " + "database backend." + ) + self.db.validate_no_broken_transaction() + with self.db.wrap_database_errors: + if params is None and kparams is None: + return self.cursor.callproc(procname) + elif kparams is None: + return self.cursor.callproc(procname, params) + else: + params = params or () + return self.cursor.callproc(procname, params, kparams) + + def execute(self, sql, params=None): + return self._execute_with_wrappers( + sql, params, many=False, executor=self._execute + ) + + def executemany(self, sql, param_list): + return self._execute_with_wrappers( + sql, param_list, many=True, executor=self._executemany + ) + + def _execute_with_wrappers(self, sql, params, many, executor): + context = {"connection": self.db, "cursor": self} + for wrapper in reversed(self.db.execute_wrappers): + executor = functools.partial(wrapper, executor) + return executor(sql, params, many, context) + + def _execute(self, sql, params, *ignored_wrapper_args): + self.db.validate_no_broken_transaction() + with self.db.wrap_database_errors: + if params is None: + # params default might be backend specific. + return self.cursor.execute(sql) + else: + return self.cursor.execute(sql, params) + + def _executemany(self, sql, param_list, *ignored_wrapper_args): + self.db.validate_no_broken_transaction() + with self.db.wrap_database_errors: + return self.cursor.executemany(sql, param_list) + + +class CursorDebugWrapper(CursorWrapper): + # XXX callproc isn't instrumented at this time. + + def execute(self, sql, params=None): + with self.debug_sql(sql, params, use_last_executed_query=True): + return super().execute(sql, params) + + def executemany(self, sql, param_list): + with self.debug_sql(sql, param_list, many=True): + return super().executemany(sql, param_list) + + @contextmanager + def debug_sql( + self, sql=None, params=None, use_last_executed_query=False, many=False + ): + start = time.monotonic() + try: + yield + finally: + stop = time.monotonic() + duration = stop - start + if use_last_executed_query: + sql = self.db.ops.last_executed_query(self.cursor, sql, params) + try: + times = len(params) if many else "" + except TypeError: + # params could be an iterator. + times = "?" + self.db.queries_log.append( + { + "sql": "%s times: %s" % (times, sql) if many else sql, + "time": "%.3f" % duration, + } + ) + logger.debug( + "(%.3f) %s; args=%s; alias=%s", + duration, + sql, + params, + self.db.alias, + extra={ + "duration": duration, + "sql": sql, + "params": params, + "alias": self.db.alias, + }, + ) + + +@contextmanager +def debug_transaction(connection, sql): + start = time.monotonic() + try: + yield + finally: + if connection.queries_logged: + stop = time.monotonic() + duration = stop - start + connection.queries_log.append( + { + "sql": "%s" % sql, + "time": "%.3f" % duration, + } + ) + logger.debug( + "(%.3f) %s; args=%s; alias=%s", + duration, + sql, + None, + connection.alias, + extra={ + "duration": duration, + "sql": sql, + "alias": connection.alias, + }, + ) + + +def split_tzname_delta(tzname): + """ + Split a time zone name into a 3-tuple of (name, sign, offset). + """ + for sign in ["+", "-"]: + if sign in tzname: + name, offset = tzname.rsplit(sign, 1) + if offset and parse_time(offset): + return name, sign, offset + return tzname, None, None + + +############################################### +# Converters from database (string) to Python # +############################################### + + +def typecast_date(s): + return ( + datetime.date(*map(int, s.split("-"))) if s else None + ) # return None if s is null + + +def typecast_time(s): # does NOT store time zone information + if not s: + return None + hour, minutes, seconds = s.split(":") + if "." in seconds: # check whether seconds have a fractional part + seconds, microseconds = seconds.split(".") + else: + microseconds = "0" + return datetime.time( + int(hour), int(minutes), int(seconds), int((microseconds + "000000")[:6]) + ) + + +def typecast_timestamp(s): # does NOT store time zone information + # "2005-07-29 15:48:00.590358-05" + # "2005-07-29 09:56:00-05" + if not s: + return None + if " " not in s: + return typecast_date(s) + d, t = s.split() + # Remove timezone information. + if "-" in t: + t, _ = t.split("-", 1) + elif "+" in t: + t, _ = t.split("+", 1) + dates = d.split("-") + times = t.split(":") + seconds = times[2] + if "." in seconds: # check whether seconds have a fractional part + seconds, microseconds = seconds.split(".") + else: + microseconds = "0" + return datetime.datetime( + int(dates[0]), + int(dates[1]), + int(dates[2]), + int(times[0]), + int(times[1]), + int(seconds), + int((microseconds + "000000")[:6]), + ) + + +############################################### +# Converters from Python to database (string) # +############################################### + + +def split_identifier(identifier): + """ + Split an SQL identifier into a two element tuple of (namespace, name). + + The identifier could be a table, column, or sequence name might be prefixed + by a namespace. + """ + try: + namespace, name = identifier.split('"."') + except ValueError: + namespace, name = "", identifier + return namespace.strip('"'), name.strip('"') + + +def truncate_name(identifier, length=None, hash_len=4): + """ + Shorten an SQL identifier to a repeatable mangled version with the given + length. + + If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE, + truncate the table portion only. + """ + namespace, name = split_identifier(identifier) + + if length is None or len(name) <= length: + return identifier + + digest = names_digest(name, length=hash_len) + return "%s%s%s" % ( + '%s"."' % namespace if namespace else "", + name[: length - hash_len], + digest, + ) + + +def names_digest(*args, length): + """ + Generate a 32-bit digest of a set of arguments that can be used to shorten + identifying names. + """ + h = md5(usedforsecurity=False) + for arg in args: + h.update(arg.encode()) + return h.hexdigest()[:length] + + +def format_number(value, max_digits, decimal_places): + """ + Format a number into a string with the requisite number of digits and + decimal places. + """ + if value is None: + return None + context = decimal.getcontext().copy() + if max_digits is not None: + context.prec = max_digits + if decimal_places is not None: + value = value.quantize( + decimal.Decimal(1).scaleb(-decimal_places), context=context + ) + else: + context.traps[decimal.Rounded] = 1 + value = context.create_decimal(value) + return "{:f}".format(value) + + +def strip_quotes(table_name): + """ + Strip quotes off of quoted table names to make them safe for use in index + names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming + scheme) becomes 'USER"."TABLE'. + """ + has_quotes = table_name.startswith('"') and table_name.endswith('"') + return table_name[1:-1] if has_quotes else table_name diff --git a/testbed/django__django/django/db/migrations/__init__.py b/testbed/django__django/django/db/migrations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f3d2d7689a49f599b6c4868c5addb0cb9bff84 --- /dev/null +++ b/testbed/django__django/django/db/migrations/__init__.py @@ -0,0 +1,2 @@ +from .migration import Migration, swappable_dependency # NOQA +from .operations import * # NOQA diff --git a/testbed/django__django/django/db/migrations/autodetector.py b/testbed/django__django/django/db/migrations/autodetector.py new file mode 100644 index 0000000000000000000000000000000000000000..154ac44419d7622f0cb4bd3b0833bdc482bfb518 --- /dev/null +++ b/testbed/django__django/django/db/migrations/autodetector.py @@ -0,0 +1,1776 @@ +import functools +import re +from collections import defaultdict +from graphlib import TopologicalSorter +from itertools import chain + +from django.conf import settings +from django.db import models +from django.db.migrations import operations +from django.db.migrations.migration import Migration +from django.db.migrations.operations.models import AlterModelOptions +from django.db.migrations.optimizer import MigrationOptimizer +from django.db.migrations.questioner import MigrationQuestioner +from django.db.migrations.utils import ( + COMPILED_REGEX_TYPE, + RegexObject, + resolve_relation, +) + + +class MigrationAutodetector: + """ + Take a pair of ProjectStates and compare them to see what the first would + need doing to make it match the second (the second usually being the + project's current state). + + Note that this naturally operates on entire projects at a time, + as it's likely that changes interact (for example, you can't + add a ForeignKey without having a migration to add the table it + depends on first). A user interface may offer single-app usage + if it wishes, with the caveat that it may not always be possible. + """ + + def __init__(self, from_state, to_state, questioner=None): + self.from_state = from_state + self.to_state = to_state + self.questioner = questioner or MigrationQuestioner() + self.existing_apps = {app for app, model in from_state.models} + + def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): + """ + Main entry point to produce a list of applicable changes. + Take a graph to base names on and an optional set of apps + to try and restrict to (restriction is not guaranteed) + """ + changes = self._detect_changes(convert_apps, graph) + changes = self.arrange_for_graph(changes, graph, migration_name) + if trim_to_apps: + changes = self._trim_to_apps(changes, trim_to_apps) + return changes + + def deep_deconstruct(self, obj): + """ + Recursive deconstruction for a field and its arguments. + Used for full comparison for rename/alter; sometimes a single-level + deconstruction will not compare correctly. + """ + if isinstance(obj, list): + return [self.deep_deconstruct(value) for value in obj] + elif isinstance(obj, tuple): + return tuple(self.deep_deconstruct(value) for value in obj) + elif isinstance(obj, dict): + return {key: self.deep_deconstruct(value) for key, value in obj.items()} + elif isinstance(obj, functools.partial): + return ( + obj.func, + self.deep_deconstruct(obj.args), + self.deep_deconstruct(obj.keywords), + ) + elif isinstance(obj, COMPILED_REGEX_TYPE): + return RegexObject(obj) + elif isinstance(obj, type): + # If this is a type that implements 'deconstruct' as an instance method, + # avoid treating this as being deconstructible itself - see #22951 + return obj + elif hasattr(obj, "deconstruct"): + deconstructed = obj.deconstruct() + if isinstance(obj, models.Field): + # we have a field which also returns a name + deconstructed = deconstructed[1:] + path, args, kwargs = deconstructed + return ( + path, + [self.deep_deconstruct(value) for value in args], + {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, + ) + else: + return obj + + def only_relation_agnostic_fields(self, fields): + """ + Return a definition of the fields that ignores field names and + what related fields actually relate to. Used for detecting renames (as + the related fields change during renames). + """ + fields_def = [] + for name, field in sorted(fields.items()): + deconstruction = self.deep_deconstruct(field) + if field.remote_field and field.remote_field.model: + deconstruction[2].pop("to", None) + fields_def.append(deconstruction) + return fields_def + + def _detect_changes(self, convert_apps=None, graph=None): + """ + Return a dict of migration plans which will achieve the + change from from_state to to_state. The dict has app labels + as keys and a list of migrations as values. + + The resulting migrations aren't specially named, but the names + do matter for dependencies inside the set. + + convert_apps is the list of apps to convert to use migrations + (i.e. to make initial migrations for, in the usual case) + + graph is an optional argument that, if provided, can help improve + dependency generation and avoid potential circular dependencies. + """ + # The first phase is generating all the operations for each app + # and gathering them into a big per-app list. + # Then go through that list, order it, and split into migrations to + # resolve dependencies caused by M2Ms and FKs. + self.generated_operations = {} + self.altered_indexes = {} + self.altered_constraints = {} + self.renamed_fields = {} + + # Prepare some old/new state and model lists, separating + # proxy models and ignoring unmigrated apps. + self.old_model_keys = set() + self.old_proxy_keys = set() + self.old_unmanaged_keys = set() + self.new_model_keys = set() + self.new_proxy_keys = set() + self.new_unmanaged_keys = set() + for (app_label, model_name), model_state in self.from_state.models.items(): + if not model_state.options.get("managed", True): + self.old_unmanaged_keys.add((app_label, model_name)) + elif app_label not in self.from_state.real_apps: + if model_state.options.get("proxy"): + self.old_proxy_keys.add((app_label, model_name)) + else: + self.old_model_keys.add((app_label, model_name)) + + for (app_label, model_name), model_state in self.to_state.models.items(): + if not model_state.options.get("managed", True): + self.new_unmanaged_keys.add((app_label, model_name)) + elif app_label not in self.from_state.real_apps or ( + convert_apps and app_label in convert_apps + ): + if model_state.options.get("proxy"): + self.new_proxy_keys.add((app_label, model_name)) + else: + self.new_model_keys.add((app_label, model_name)) + + self.from_state.resolve_fields_and_relations() + self.to_state.resolve_fields_and_relations() + + # Renames have to come first + self.generate_renamed_models() + + # Prepare lists of fields and generate through model map + self._prepare_field_lists() + self._generate_through_model_map() + + # Generate non-rename model operations + self.generate_deleted_models() + self.generate_created_models() + self.generate_deleted_proxies() + self.generate_created_proxies() + self.generate_altered_options() + self.generate_altered_managers() + self.generate_altered_db_table_comment() + + # Create the renamed fields and store them in self.renamed_fields. + # They are used by create_altered_indexes(), generate_altered_fields(), + # generate_removed_altered_index/unique_together(), and + # generate_altered_index/unique_together(). + self.create_renamed_fields() + # Create the altered indexes and store them in self.altered_indexes. + # This avoids the same computation in generate_removed_indexes() + # and generate_added_indexes(). + self.create_altered_indexes() + self.create_altered_constraints() + # Generate index removal operations before field is removed + self.generate_removed_constraints() + self.generate_removed_indexes() + # Generate field renaming operations. + self.generate_renamed_fields() + self.generate_renamed_indexes() + # Generate removal of foo together. + self.generate_removed_altered_unique_together() + self.generate_removed_altered_index_together() # RemovedInDjango51Warning. + # Generate field operations. + self.generate_removed_fields() + self.generate_added_fields() + self.generate_altered_fields() + self.generate_altered_order_with_respect_to() + self.generate_altered_unique_together() + self.generate_altered_index_together() # RemovedInDjango51Warning. + self.generate_added_indexes() + self.generate_added_constraints() + self.generate_altered_db_table() + + self._sort_migrations() + self._build_migration_list(graph) + self._optimize_migrations() + + return self.migrations + + def _prepare_field_lists(self): + """ + Prepare field lists and a list of the fields that used through models + in the old state so dependencies can be made from the through model + deletion to the field that uses it. + """ + self.kept_model_keys = self.old_model_keys & self.new_model_keys + self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys + self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys + self.through_users = {} + self.old_field_keys = { + (app_label, model_name, field_name) + for app_label, model_name in self.kept_model_keys + for field_name in self.from_state.models[ + app_label, self.renamed_models.get((app_label, model_name), model_name) + ].fields + } + self.new_field_keys = { + (app_label, model_name, field_name) + for app_label, model_name in self.kept_model_keys + for field_name in self.to_state.models[app_label, model_name].fields + } + + def _generate_through_model_map(self): + """Through model map generation.""" + for app_label, model_name in sorted(self.old_model_keys): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + for field_name, field in old_model_state.fields.items(): + if hasattr(field, "remote_field") and getattr( + field.remote_field, "through", None + ): + through_key = resolve_relation( + field.remote_field.through, app_label, model_name + ) + self.through_users[through_key] = ( + app_label, + old_model_name, + field_name, + ) + + @staticmethod + def _resolve_dependency(dependency): + """ + Return the resolved dependency and a boolean denoting whether or not + it was swappable. + """ + if dependency[0] != "__setting__": + return dependency, False + resolved_app_label, resolved_object_name = getattr( + settings, dependency[1] + ).split(".") + return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True + + def _build_migration_list(self, graph=None): + """ + Chop the lists of operations up into migrations with dependencies on + each other. Do this by going through an app's list of operations until + one is found that has an outgoing dependency that isn't in another + app's migration yet (hasn't been chopped off its list). Then chop off + the operations before it into a migration and move onto the next app. + If the loops completes without doing anything, there's a circular + dependency (which _should_ be impossible as the operations are + all split at this point so they can't depend and be depended on). + """ + self.migrations = {} + num_ops = sum(len(x) for x in self.generated_operations.values()) + chop_mode = False + while num_ops: + # On every iteration, we step through all the apps and see if there + # is a completed set of operations. + # If we find that a subset of the operations are complete we can + # try to chop it off from the rest and continue, but we only + # do this if we've already been through the list once before + # without any chopping and nothing has changed. + for app_label in sorted(self.generated_operations): + chopped = [] + dependencies = set() + for operation in list(self.generated_operations[app_label]): + deps_satisfied = True + operation_dependencies = set() + for dep in operation._auto_deps: + # Temporarily resolve the swappable dependency to + # prevent circular references. While keeping the + # dependency checks on the resolved model, add the + # swappable dependencies. + original_dep = dep + dep, is_swappable_dep = self._resolve_dependency(dep) + if dep[0] != app_label: + # External app dependency. See if it's not yet + # satisfied. + for other_operation in self.generated_operations.get( + dep[0], [] + ): + if self.check_dependency(other_operation, dep): + deps_satisfied = False + break + if not deps_satisfied: + break + else: + if is_swappable_dep: + operation_dependencies.add( + (original_dep[0], original_dep[1]) + ) + elif dep[0] in self.migrations: + operation_dependencies.add( + (dep[0], self.migrations[dep[0]][-1].name) + ) + else: + # If we can't find the other app, we add a + # first/last dependency, but only if we've + # already been through once and checked + # everything. + if chop_mode: + # If the app already exists, we add a + # dependency on the last migration, as + # we don't know which migration + # contains the target field. If it's + # not yet migrated or has no + # migrations, we use __first__. + if graph and graph.leaf_nodes(dep[0]): + operation_dependencies.add( + graph.leaf_nodes(dep[0])[0] + ) + else: + operation_dependencies.add( + (dep[0], "__first__") + ) + else: + deps_satisfied = False + if deps_satisfied: + chopped.append(operation) + dependencies.update(operation_dependencies) + del self.generated_operations[app_label][0] + else: + break + # Make a migration! Well, only if there's stuff to put in it + if dependencies or chopped: + if not self.generated_operations[app_label] or chop_mode: + subclass = type( + "Migration", + (Migration,), + {"operations": [], "dependencies": []}, + ) + instance = subclass( + "auto_%i" % (len(self.migrations.get(app_label, [])) + 1), + app_label, + ) + instance.dependencies = list(dependencies) + instance.operations = chopped + instance.initial = app_label not in self.existing_apps + self.migrations.setdefault(app_label, []).append(instance) + chop_mode = False + else: + self.generated_operations[app_label] = ( + chopped + self.generated_operations[app_label] + ) + new_num_ops = sum(len(x) for x in self.generated_operations.values()) + if new_num_ops == num_ops: + if not chop_mode: + chop_mode = True + else: + raise ValueError( + "Cannot resolve operation dependencies: %r" + % self.generated_operations + ) + num_ops = new_num_ops + + def _sort_migrations(self): + """ + Reorder to make things possible. Reordering may be needed so FKs work + nicely inside the same app. + """ + for app_label, ops in sorted(self.generated_operations.items()): + ts = TopologicalSorter() + for op in ops: + ts.add(op) + for dep in op._auto_deps: + # Resolve intra-app dependencies to handle circular + # references involving a swappable model. + dep = self._resolve_dependency(dep)[0] + if dep[0] != app_label: + continue + ts.add(op, *(x for x in ops if self.check_dependency(x, dep))) + self.generated_operations[app_label] = list(ts.static_order()) + + def _optimize_migrations(self): + # Add in internal dependencies among the migrations + for app_label, migrations in self.migrations.items(): + for m1, m2 in zip(migrations, migrations[1:]): + m2.dependencies.append((app_label, m1.name)) + + # De-dupe dependencies + for migrations in self.migrations.values(): + for migration in migrations: + migration.dependencies = list(set(migration.dependencies)) + + # Optimize migrations + for app_label, migrations in self.migrations.items(): + for migration in migrations: + migration.operations = MigrationOptimizer().optimize( + migration.operations, app_label + ) + + def check_dependency(self, operation, dependency): + """ + Return True if the given operation depends on the given dependency, + False otherwise. + """ + # Created model + if dependency[2] is None and dependency[3] is True: + return ( + isinstance(operation, operations.CreateModel) + and operation.name_lower == dependency[1].lower() + ) + # Created field + elif dependency[2] is not None and dependency[3] is True: + return ( + isinstance(operation, operations.CreateModel) + and operation.name_lower == dependency[1].lower() + and any(dependency[2] == x for x, y in operation.fields) + ) or ( + isinstance(operation, operations.AddField) + and operation.model_name_lower == dependency[1].lower() + and operation.name_lower == dependency[2].lower() + ) + # Removed field + elif dependency[2] is not None and dependency[3] is False: + return ( + isinstance(operation, operations.RemoveField) + and operation.model_name_lower == dependency[1].lower() + and operation.name_lower == dependency[2].lower() + ) + # Removed model + elif dependency[2] is None and dependency[3] is False: + return ( + isinstance(operation, operations.DeleteModel) + and operation.name_lower == dependency[1].lower() + ) + # Field being altered + elif dependency[2] is not None and dependency[3] == "alter": + return ( + isinstance(operation, operations.AlterField) + and operation.model_name_lower == dependency[1].lower() + and operation.name_lower == dependency[2].lower() + ) + # order_with_respect_to being unset for a field + elif dependency[2] is not None and dependency[3] == "order_wrt_unset": + return ( + isinstance(operation, operations.AlterOrderWithRespectTo) + and operation.name_lower == dependency[1].lower() + and (operation.order_with_respect_to or "").lower() + != dependency[2].lower() + ) + # Field is removed and part of an index/unique_together + elif dependency[2] is not None and dependency[3] == "foo_together_change": + return ( + isinstance( + operation, + (operations.AlterUniqueTogether, operations.AlterIndexTogether), + ) + and operation.name_lower == dependency[1].lower() + ) + # Unknown dependency. Raise an error. + else: + raise ValueError("Can't handle dependency %r" % (dependency,)) + + def add_operation(self, app_label, operation, dependencies=None, beginning=False): + # Dependencies are + # (app_label, model_name, field_name, create/delete as True/False) + operation._auto_deps = dependencies or [] + if beginning: + self.generated_operations.setdefault(app_label, []).insert(0, operation) + else: + self.generated_operations.setdefault(app_label, []).append(operation) + + def swappable_first_key(self, item): + """ + Place potential swappable models first in lists of created models (only + real way to solve #22783). + """ + try: + model_state = self.to_state.models[item] + base_names = { + base if isinstance(base, str) else base.__name__ + for base in model_state.bases + } + string_version = "%s.%s" % (item[0], item[1]) + if ( + model_state.options.get("swappable") + or "AbstractUser" in base_names + or "AbstractBaseUser" in base_names + or settings.AUTH_USER_MODEL.lower() == string_version.lower() + ): + return ("___" + item[0], "___" + item[1]) + except LookupError: + pass + return item + + def generate_renamed_models(self): + """ + Find any renamed models, generate the operations for them, and remove + the old entry from the model lists. Must be run before other + model-level generation. + """ + self.renamed_models = {} + self.renamed_models_rel = {} + added_models = self.new_model_keys - self.old_model_keys + for app_label, model_name in sorted(added_models): + model_state = self.to_state.models[app_label, model_name] + model_fields_def = self.only_relation_agnostic_fields(model_state.fields) + + removed_models = self.old_model_keys - self.new_model_keys + for rem_app_label, rem_model_name in removed_models: + if rem_app_label == app_label: + rem_model_state = self.from_state.models[ + rem_app_label, rem_model_name + ] + rem_model_fields_def = self.only_relation_agnostic_fields( + rem_model_state.fields + ) + if model_fields_def == rem_model_fields_def: + if self.questioner.ask_rename_model( + rem_model_state, model_state + ): + dependencies = [] + fields = list(model_state.fields.values()) + [ + field.remote_field + for relations in self.to_state.relations[ + app_label, model_name + ].values() + for field in relations.values() + ] + for field in fields: + if field.is_relation: + dependencies.extend( + self._get_dependencies_for_foreign_key( + app_label, + model_name, + field, + self.to_state, + ) + ) + self.add_operation( + app_label, + operations.RenameModel( + old_name=rem_model_state.name, + new_name=model_state.name, + ), + dependencies=dependencies, + ) + self.renamed_models[app_label, model_name] = rem_model_name + renamed_models_rel_key = "%s.%s" % ( + rem_model_state.app_label, + rem_model_state.name_lower, + ) + self.renamed_models_rel[ + renamed_models_rel_key + ] = "%s.%s" % ( + model_state.app_label, + model_state.name_lower, + ) + self.old_model_keys.remove((rem_app_label, rem_model_name)) + self.old_model_keys.add((app_label, model_name)) + break + + def generate_created_models(self): + """ + Find all new models (both managed and unmanaged) and make create + operations for them as well as separate operations to create any + foreign key or M2M relationships (these are optimized later, if + possible). + + Defer any model options that refer to collections of fields that might + be deferred (e.g. unique_together, index_together). + """ + old_keys = self.old_model_keys | self.old_unmanaged_keys + added_models = self.new_model_keys - old_keys + added_unmanaged_models = self.new_unmanaged_keys - old_keys + all_added_models = chain( + sorted(added_models, key=self.swappable_first_key, reverse=True), + sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True), + ) + for app_label, model_name in all_added_models: + model_state = self.to_state.models[app_label, model_name] + # Gather related fields + related_fields = {} + primary_key_rel = None + for field_name, field in model_state.fields.items(): + if field.remote_field: + if field.remote_field.model: + if field.primary_key: + primary_key_rel = field.remote_field.model + elif not field.remote_field.parent_link: + related_fields[field_name] = field + if getattr(field.remote_field, "through", None): + related_fields[field_name] = field + + # Are there indexes/unique|index_together to defer? + indexes = model_state.options.pop("indexes") + constraints = model_state.options.pop("constraints") + unique_together = model_state.options.pop("unique_together", None) + # RemovedInDjango51Warning. + index_together = model_state.options.pop("index_together", None) + order_with_respect_to = model_state.options.pop( + "order_with_respect_to", None + ) + # Depend on the deletion of any possible proxy version of us + dependencies = [ + (app_label, model_name, None, False), + ] + # Depend on all bases + for base in model_state.bases: + if isinstance(base, str) and "." in base: + base_app_label, base_name = base.split(".", 1) + dependencies.append((base_app_label, base_name, None, True)) + # Depend on the removal of base fields if the new model has + # a field with the same name. + old_base_model_state = self.from_state.models.get( + (base_app_label, base_name) + ) + new_base_model_state = self.to_state.models.get( + (base_app_label, base_name) + ) + if old_base_model_state and new_base_model_state: + removed_base_fields = ( + set(old_base_model_state.fields) + .difference( + new_base_model_state.fields, + ) + .intersection(model_state.fields) + ) + for removed_base_field in removed_base_fields: + dependencies.append( + (base_app_label, base_name, removed_base_field, False) + ) + # Depend on the other end of the primary key if it's a relation + if primary_key_rel: + dependencies.append( + resolve_relation( + primary_key_rel, + app_label, + model_name, + ) + + (None, True) + ) + # Generate creation operation + self.add_operation( + app_label, + operations.CreateModel( + name=model_state.name, + fields=[ + d + for d in model_state.fields.items() + if d[0] not in related_fields + ], + options=model_state.options, + bases=model_state.bases, + managers=model_state.managers, + ), + dependencies=dependencies, + beginning=True, + ) + + # Don't add operations which modify the database for unmanaged models + if not model_state.options.get("managed", True): + continue + + # Generate operations for each related field + for name, field in sorted(related_fields.items()): + dependencies = self._get_dependencies_for_foreign_key( + app_label, + model_name, + field, + self.to_state, + ) + # Depend on our own model being created + dependencies.append((app_label, model_name, None, True)) + # Make operation + self.add_operation( + app_label, + operations.AddField( + model_name=model_name, + name=name, + field=field, + ), + dependencies=list(set(dependencies)), + ) + # Generate other opns + if order_with_respect_to: + self.add_operation( + app_label, + operations.AlterOrderWithRespectTo( + name=model_name, + order_with_respect_to=order_with_respect_to, + ), + dependencies=[ + (app_label, model_name, order_with_respect_to, True), + (app_label, model_name, None, True), + ], + ) + related_dependencies = [ + (app_label, model_name, name, True) for name in sorted(related_fields) + ] + related_dependencies.append((app_label, model_name, None, True)) + for index in indexes: + self.add_operation( + app_label, + operations.AddIndex( + model_name=model_name, + index=index, + ), + dependencies=related_dependencies, + ) + for constraint in constraints: + self.add_operation( + app_label, + operations.AddConstraint( + model_name=model_name, + constraint=constraint, + ), + dependencies=related_dependencies, + ) + if unique_together: + self.add_operation( + app_label, + operations.AlterUniqueTogether( + name=model_name, + unique_together=unique_together, + ), + dependencies=related_dependencies, + ) + # RemovedInDjango51Warning. + if index_together: + self.add_operation( + app_label, + operations.AlterIndexTogether( + name=model_name, + index_together=index_together, + ), + dependencies=related_dependencies, + ) + # Fix relationships if the model changed from a proxy model to a + # concrete model. + relations = self.to_state.relations + if (app_label, model_name) in self.old_proxy_keys: + for related_model_key, related_fields in relations[ + app_label, model_name + ].items(): + related_model_state = self.to_state.models[related_model_key] + for related_field_name, related_field in related_fields.items(): + self.add_operation( + related_model_state.app_label, + operations.AlterField( + model_name=related_model_state.name, + name=related_field_name, + field=related_field, + ), + dependencies=[(app_label, model_name, None, True)], + ) + + def generate_created_proxies(self): + """ + Make CreateModel statements for proxy models. Use the same statements + as that way there's less code duplication, but for proxy models it's + safe to skip all the pointless field stuff and chuck out an operation. + """ + added = self.new_proxy_keys - self.old_proxy_keys + for app_label, model_name in sorted(added): + model_state = self.to_state.models[app_label, model_name] + assert model_state.options.get("proxy") + # Depend on the deletion of any possible non-proxy version of us + dependencies = [ + (app_label, model_name, None, False), + ] + # Depend on all bases + for base in model_state.bases: + if isinstance(base, str) and "." in base: + base_app_label, base_name = base.split(".", 1) + dependencies.append((base_app_label, base_name, None, True)) + # Generate creation operation + self.add_operation( + app_label, + operations.CreateModel( + name=model_state.name, + fields=[], + options=model_state.options, + bases=model_state.bases, + managers=model_state.managers, + ), + # Depend on the deletion of any possible non-proxy version of us + dependencies=dependencies, + ) + + def generate_deleted_models(self): + """ + Find all deleted models (managed and unmanaged) and make delete + operations for them as well as separate operations to delete any + foreign key or M2M relationships (these are optimized later, if + possible). + + Also bring forward removal of any model options that refer to + collections of fields - the inverse of generate_created_models(). + """ + new_keys = self.new_model_keys | self.new_unmanaged_keys + deleted_models = self.old_model_keys - new_keys + deleted_unmanaged_models = self.old_unmanaged_keys - new_keys + all_deleted_models = chain( + sorted(deleted_models), sorted(deleted_unmanaged_models) + ) + for app_label, model_name in all_deleted_models: + model_state = self.from_state.models[app_label, model_name] + # Gather related fields + related_fields = {} + for field_name, field in model_state.fields.items(): + if field.remote_field: + if field.remote_field.model: + related_fields[field_name] = field + if getattr(field.remote_field, "through", None): + related_fields[field_name] = field + # Generate option removal first + unique_together = model_state.options.pop("unique_together", None) + # RemovedInDjango51Warning. + index_together = model_state.options.pop("index_together", None) + if unique_together: + self.add_operation( + app_label, + operations.AlterUniqueTogether( + name=model_name, + unique_together=None, + ), + ) + # RemovedInDjango51Warning. + if index_together: + self.add_operation( + app_label, + operations.AlterIndexTogether( + name=model_name, + index_together=None, + ), + ) + # Then remove each related field + for name in sorted(related_fields): + self.add_operation( + app_label, + operations.RemoveField( + model_name=model_name, + name=name, + ), + ) + # Finally, remove the model. + # This depends on both the removal/alteration of all incoming fields + # and the removal of all its own related fields, and if it's + # a through model the field that references it. + dependencies = [] + relations = self.from_state.relations + for ( + related_object_app_label, + object_name, + ), relation_related_fields in relations[app_label, model_name].items(): + for field_name, field in relation_related_fields.items(): + dependencies.append( + (related_object_app_label, object_name, field_name, False), + ) + if not field.many_to_many: + dependencies.append( + ( + related_object_app_label, + object_name, + field_name, + "alter", + ), + ) + + for name in sorted(related_fields): + dependencies.append((app_label, model_name, name, False)) + # We're referenced in another field's through= + through_user = self.through_users.get((app_label, model_state.name_lower)) + if through_user: + dependencies.append( + (through_user[0], through_user[1], through_user[2], False) + ) + # Finally, make the operation, deduping any dependencies + self.add_operation( + app_label, + operations.DeleteModel( + name=model_state.name, + ), + dependencies=list(set(dependencies)), + ) + + def generate_deleted_proxies(self): + """Make DeleteModel options for proxy models.""" + deleted = self.old_proxy_keys - self.new_proxy_keys + for app_label, model_name in sorted(deleted): + model_state = self.from_state.models[app_label, model_name] + assert model_state.options.get("proxy") + self.add_operation( + app_label, + operations.DeleteModel( + name=model_state.name, + ), + ) + + def create_renamed_fields(self): + """Work out renamed fields.""" + self.renamed_operations = [] + old_field_keys = self.old_field_keys.copy() + for app_label, model_name, field_name in sorted( + self.new_field_keys - old_field_keys + ): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + field = new_model_state.get_field(field_name) + # Scan to see if this is actually a rename! + field_dec = self.deep_deconstruct(field) + for rem_app_label, rem_model_name, rem_field_name in sorted( + old_field_keys - self.new_field_keys + ): + if rem_app_label == app_label and rem_model_name == model_name: + old_field = old_model_state.get_field(rem_field_name) + old_field_dec = self.deep_deconstruct(old_field) + if ( + field.remote_field + and field.remote_field.model + and "to" in old_field_dec[2] + ): + old_rel_to = old_field_dec[2]["to"] + if old_rel_to in self.renamed_models_rel: + old_field_dec[2]["to"] = self.renamed_models_rel[old_rel_to] + old_field.set_attributes_from_name(rem_field_name) + old_db_column = old_field.get_attname_column()[1] + if old_field_dec == field_dec or ( + # Was the field renamed and db_column equal to the + # old field's column added? + old_field_dec[0:2] == field_dec[0:2] + and dict(old_field_dec[2], db_column=old_db_column) + == field_dec[2] + ): + if self.questioner.ask_rename( + model_name, rem_field_name, field_name, field + ): + self.renamed_operations.append( + ( + rem_app_label, + rem_model_name, + old_field.db_column, + rem_field_name, + app_label, + model_name, + field, + field_name, + ) + ) + old_field_keys.remove( + (rem_app_label, rem_model_name, rem_field_name) + ) + old_field_keys.add((app_label, model_name, field_name)) + self.renamed_fields[ + app_label, model_name, field_name + ] = rem_field_name + break + + def generate_renamed_fields(self): + """Generate RenameField operations.""" + for ( + rem_app_label, + rem_model_name, + rem_db_column, + rem_field_name, + app_label, + model_name, + field, + field_name, + ) in self.renamed_operations: + # A db_column mismatch requires a prior noop AlterField for the + # subsequent RenameField to be a noop on attempts at preserving the + # old name. + if rem_db_column != field.db_column: + altered_field = field.clone() + altered_field.name = rem_field_name + self.add_operation( + app_label, + operations.AlterField( + model_name=model_name, + name=rem_field_name, + field=altered_field, + ), + ) + self.add_operation( + app_label, + operations.RenameField( + model_name=model_name, + old_name=rem_field_name, + new_name=field_name, + ), + ) + self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) + self.old_field_keys.add((app_label, model_name, field_name)) + + def generate_added_fields(self): + """Make AddField operations.""" + for app_label, model_name, field_name in sorted( + self.new_field_keys - self.old_field_keys + ): + self._generate_added_field(app_label, model_name, field_name) + + def _generate_added_field(self, app_label, model_name, field_name): + field = self.to_state.models[app_label, model_name].get_field(field_name) + # Adding a field always depends at least on its removal. + dependencies = [(app_label, model_name, field_name, False)] + # Fields that are foreignkeys/m2ms depend on stuff. + if field.remote_field and field.remote_field.model: + dependencies.extend( + self._get_dependencies_for_foreign_key( + app_label, + model_name, + field, + self.to_state, + ) + ) + # You can't just add NOT NULL fields with no default or fields + # which don't allow empty strings as default. + time_fields = (models.DateField, models.DateTimeField, models.TimeField) + preserve_default = ( + field.null + or field.has_default() + or field.db_default is not models.NOT_PROVIDED + or field.many_to_many + or (field.blank and field.empty_strings_allowed) + or (isinstance(field, time_fields) and field.auto_now) + ) + if not preserve_default: + field = field.clone() + if isinstance(field, time_fields) and field.auto_now_add: + field.default = self.questioner.ask_auto_now_add_addition( + field_name, model_name + ) + else: + field.default = self.questioner.ask_not_null_addition( + field_name, model_name + ) + if ( + field.unique + and field.default is not models.NOT_PROVIDED + and callable(field.default) + ): + self.questioner.ask_unique_callable_default_addition(field_name, model_name) + self.add_operation( + app_label, + operations.AddField( + model_name=model_name, + name=field_name, + field=field, + preserve_default=preserve_default, + ), + dependencies=dependencies, + ) + + def generate_removed_fields(self): + """Make RemoveField operations.""" + for app_label, model_name, field_name in sorted( + self.old_field_keys - self.new_field_keys + ): + self._generate_removed_field(app_label, model_name, field_name) + + def _generate_removed_field(self, app_label, model_name, field_name): + self.add_operation( + app_label, + operations.RemoveField( + model_name=model_name, + name=field_name, + ), + # We might need to depend on the removal of an + # order_with_respect_to or index/unique_together operation; + # this is safely ignored if there isn't one + dependencies=[ + (app_label, model_name, field_name, "order_wrt_unset"), + (app_label, model_name, field_name, "foo_together_change"), + ], + ) + + def generate_altered_fields(self): + """ + Make AlterField operations, or possibly RemovedField/AddField if alter + isn't possible. + """ + for app_label, model_name, field_name in sorted( + self.old_field_keys & self.new_field_keys + ): + # Did the field change? + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_field_name = self.renamed_fields.get( + (app_label, model_name, field_name), field_name + ) + old_field = self.from_state.models[app_label, old_model_name].get_field( + old_field_name + ) + new_field = self.to_state.models[app_label, model_name].get_field( + field_name + ) + dependencies = [] + # Implement any model renames on relations; these are handled by RenameModel + # so we need to exclude them from the comparison + if hasattr(new_field, "remote_field") and getattr( + new_field.remote_field, "model", None + ): + rename_key = resolve_relation( + new_field.remote_field.model, app_label, model_name + ) + if rename_key in self.renamed_models: + new_field.remote_field.model = old_field.remote_field.model + # Handle ForeignKey which can only have a single to_field. + remote_field_name = getattr(new_field.remote_field, "field_name", None) + if remote_field_name: + to_field_rename_key = rename_key + (remote_field_name,) + if to_field_rename_key in self.renamed_fields: + # Repoint both model and field name because to_field + # inclusion in ForeignKey.deconstruct() is based on + # both. + new_field.remote_field.model = old_field.remote_field.model + new_field.remote_field.field_name = ( + old_field.remote_field.field_name + ) + # Handle ForeignObjects which can have multiple from_fields/to_fields. + from_fields = getattr(new_field, "from_fields", None) + if from_fields: + from_rename_key = (app_label, model_name) + new_field.from_fields = tuple( + [ + self.renamed_fields.get( + from_rename_key + (from_field,), from_field + ) + for from_field in from_fields + ] + ) + new_field.to_fields = tuple( + [ + self.renamed_fields.get(rename_key + (to_field,), to_field) + for to_field in new_field.to_fields + ] + ) + dependencies.extend( + self._get_dependencies_for_foreign_key( + app_label, + model_name, + new_field, + self.to_state, + ) + ) + if hasattr(new_field, "remote_field") and getattr( + new_field.remote_field, "through", None + ): + rename_key = resolve_relation( + new_field.remote_field.through, app_label, model_name + ) + if rename_key in self.renamed_models: + new_field.remote_field.through = old_field.remote_field.through + old_field_dec = self.deep_deconstruct(old_field) + new_field_dec = self.deep_deconstruct(new_field) + # If the field was confirmed to be renamed it means that only + # db_column was allowed to change which generate_renamed_fields() + # already accounts for by adding an AlterField operation. + if old_field_dec != new_field_dec and old_field_name == field_name: + both_m2m = old_field.many_to_many and new_field.many_to_many + neither_m2m = not old_field.many_to_many and not new_field.many_to_many + if both_m2m or neither_m2m: + # Either both fields are m2m or neither is + preserve_default = True + if ( + old_field.null + and not new_field.null + and not new_field.has_default() + and new_field.db_default is models.NOT_PROVIDED + and not new_field.many_to_many + ): + field = new_field.clone() + new_default = self.questioner.ask_not_null_alteration( + field_name, model_name + ) + if new_default is not models.NOT_PROVIDED: + field.default = new_default + preserve_default = False + else: + field = new_field + self.add_operation( + app_label, + operations.AlterField( + model_name=model_name, + name=field_name, + field=field, + preserve_default=preserve_default, + ), + dependencies=dependencies, + ) + else: + # We cannot alter between m2m and concrete fields + self._generate_removed_field(app_label, model_name, field_name) + self._generate_added_field(app_label, model_name, field_name) + + def create_altered_indexes(self): + option_name = operations.AddIndex.option_name + self.renamed_index_together_values = defaultdict(list) + + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + + old_indexes = old_model_state.options[option_name] + new_indexes = new_model_state.options[option_name] + added_indexes = [idx for idx in new_indexes if idx not in old_indexes] + removed_indexes = [idx for idx in old_indexes if idx not in new_indexes] + renamed_indexes = [] + # Find renamed indexes. + remove_from_added = [] + remove_from_removed = [] + for new_index in added_indexes: + new_index_dec = new_index.deconstruct() + new_index_name = new_index_dec[2].pop("name") + for old_index in removed_indexes: + old_index_dec = old_index.deconstruct() + old_index_name = old_index_dec[2].pop("name") + # Indexes are the same except for the names. + if ( + new_index_dec == old_index_dec + and new_index_name != old_index_name + ): + renamed_indexes.append((old_index_name, new_index_name, None)) + remove_from_added.append(new_index) + remove_from_removed.append(old_index) + # Find index_together changed to indexes. + for ( + old_value, + new_value, + index_together_app_label, + index_together_model_name, + dependencies, + ) in self._get_altered_foo_together_operations( + operations.AlterIndexTogether.option_name + ): + if ( + app_label != index_together_app_label + or model_name != index_together_model_name + ): + continue + removed_values = old_value.difference(new_value) + for removed_index_together in removed_values: + renamed_index_together_indexes = [] + for new_index in added_indexes: + _, args, kwargs = new_index.deconstruct() + # Ensure only 'fields' are defined in the Index. + if ( + not args + and new_index.fields == list(removed_index_together) + and set(kwargs) == {"name", "fields"} + ): + renamed_index_together_indexes.append(new_index) + + if len(renamed_index_together_indexes) == 1: + renamed_index = renamed_index_together_indexes[0] + remove_from_added.append(renamed_index) + renamed_indexes.append( + (None, renamed_index.name, removed_index_together) + ) + self.renamed_index_together_values[ + index_together_app_label, index_together_model_name + ].append(removed_index_together) + # Remove renamed indexes from the lists of added and removed + # indexes. + added_indexes = [ + idx for idx in added_indexes if idx not in remove_from_added + ] + removed_indexes = [ + idx for idx in removed_indexes if idx not in remove_from_removed + ] + + self.altered_indexes.update( + { + (app_label, model_name): { + "added_indexes": added_indexes, + "removed_indexes": removed_indexes, + "renamed_indexes": renamed_indexes, + } + } + ) + + def generate_added_indexes(self): + for (app_label, model_name), alt_indexes in self.altered_indexes.items(): + dependencies = self._get_dependencies_for_model(app_label, model_name) + for index in alt_indexes["added_indexes"]: + self.add_operation( + app_label, + operations.AddIndex( + model_name=model_name, + index=index, + ), + dependencies=dependencies, + ) + + def generate_removed_indexes(self): + for (app_label, model_name), alt_indexes in self.altered_indexes.items(): + for index in alt_indexes["removed_indexes"]: + self.add_operation( + app_label, + operations.RemoveIndex( + model_name=model_name, + name=index.name, + ), + ) + + def generate_renamed_indexes(self): + for (app_label, model_name), alt_indexes in self.altered_indexes.items(): + for old_index_name, new_index_name, old_fields in alt_indexes[ + "renamed_indexes" + ]: + self.add_operation( + app_label, + operations.RenameIndex( + model_name=model_name, + new_name=new_index_name, + old_name=old_index_name, + old_fields=old_fields, + ), + ) + + def create_altered_constraints(self): + option_name = operations.AddConstraint.option_name + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + + old_constraints = old_model_state.options[option_name] + new_constraints = new_model_state.options[option_name] + add_constraints = [c for c in new_constraints if c not in old_constraints] + rem_constraints = [c for c in old_constraints if c not in new_constraints] + + self.altered_constraints.update( + { + (app_label, model_name): { + "added_constraints": add_constraints, + "removed_constraints": rem_constraints, + } + } + ) + + def generate_added_constraints(self): + for ( + app_label, + model_name, + ), alt_constraints in self.altered_constraints.items(): + dependencies = self._get_dependencies_for_model(app_label, model_name) + for constraint in alt_constraints["added_constraints"]: + self.add_operation( + app_label, + operations.AddConstraint( + model_name=model_name, + constraint=constraint, + ), + dependencies=dependencies, + ) + + def generate_removed_constraints(self): + for ( + app_label, + model_name, + ), alt_constraints in self.altered_constraints.items(): + for constraint in alt_constraints["removed_constraints"]: + self.add_operation( + app_label, + operations.RemoveConstraint( + model_name=model_name, + name=constraint.name, + ), + ) + + @staticmethod + def _get_dependencies_for_foreign_key(app_label, model_name, field, project_state): + remote_field_model = None + if hasattr(field.remote_field, "model"): + remote_field_model = field.remote_field.model + else: + relations = project_state.relations[app_label, model_name] + for (remote_app_label, remote_model_name), fields in relations.items(): + if any( + field == related_field.remote_field + for related_field in fields.values() + ): + remote_field_model = f"{remote_app_label}.{remote_model_name}" + break + # Account for FKs to swappable models + swappable_setting = getattr(field, "swappable_setting", None) + if swappable_setting is not None: + dep_app_label = "__setting__" + dep_object_name = swappable_setting + else: + dep_app_label, dep_object_name = resolve_relation( + remote_field_model, + app_label, + model_name, + ) + dependencies = [(dep_app_label, dep_object_name, None, True)] + if getattr(field.remote_field, "through", None): + through_app_label, through_object_name = resolve_relation( + field.remote_field.through, + app_label, + model_name, + ) + dependencies.append((through_app_label, through_object_name, None, True)) + return dependencies + + def _get_dependencies_for_model(self, app_label, model_name): + """Return foreign key dependencies of the given model.""" + dependencies = [] + model_state = self.to_state.models[app_label, model_name] + for field in model_state.fields.values(): + if field.is_relation: + dependencies.extend( + self._get_dependencies_for_foreign_key( + app_label, + model_name, + field, + self.to_state, + ) + ) + return dependencies + + def _get_altered_foo_together_operations(self, option_name): + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + + # We run the old version through the field renames to account for those + old_value = old_model_state.options.get(option_name) + old_value = ( + { + tuple( + self.renamed_fields.get((app_label, model_name, n), n) + for n in unique + ) + for unique in old_value + } + if old_value + else set() + ) + + new_value = new_model_state.options.get(option_name) + new_value = set(new_value) if new_value else set() + + if old_value != new_value: + dependencies = [] + for foo_togethers in new_value: + for field_name in foo_togethers: + field = new_model_state.get_field(field_name) + if field.remote_field and field.remote_field.model: + dependencies.extend( + self._get_dependencies_for_foreign_key( + app_label, + model_name, + field, + self.to_state, + ) + ) + yield ( + old_value, + new_value, + app_label, + model_name, + dependencies, + ) + + def _generate_removed_altered_foo_together(self, operation): + for ( + old_value, + new_value, + app_label, + model_name, + dependencies, + ) in self._get_altered_foo_together_operations(operation.option_name): + if operation == operations.AlterIndexTogether: + old_value = { + value + for value in old_value + if value + not in self.renamed_index_together_values[app_label, model_name] + } + removal_value = new_value.intersection(old_value) + if removal_value or old_value: + self.add_operation( + app_label, + operation( + name=model_name, **{operation.option_name: removal_value} + ), + dependencies=dependencies, + ) + + def generate_removed_altered_unique_together(self): + self._generate_removed_altered_foo_together(operations.AlterUniqueTogether) + + # RemovedInDjango51Warning. + def generate_removed_altered_index_together(self): + self._generate_removed_altered_foo_together(operations.AlterIndexTogether) + + def _generate_altered_foo_together(self, operation): + for ( + old_value, + new_value, + app_label, + model_name, + dependencies, + ) in self._get_altered_foo_together_operations(operation.option_name): + removal_value = new_value.intersection(old_value) + if new_value != removal_value: + self.add_operation( + app_label, + operation(name=model_name, **{operation.option_name: new_value}), + dependencies=dependencies, + ) + + def generate_altered_unique_together(self): + self._generate_altered_foo_together(operations.AlterUniqueTogether) + + # RemovedInDjango51Warning. + def generate_altered_index_together(self): + self._generate_altered_foo_together(operations.AlterIndexTogether) + + def generate_altered_db_table(self): + models_to_check = self.kept_model_keys.union( + self.kept_proxy_keys, self.kept_unmanaged_keys + ) + for app_label, model_name in sorted(models_to_check): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + old_db_table_name = old_model_state.options.get("db_table") + new_db_table_name = new_model_state.options.get("db_table") + if old_db_table_name != new_db_table_name: + self.add_operation( + app_label, + operations.AlterModelTable( + name=model_name, + table=new_db_table_name, + ), + ) + + def generate_altered_db_table_comment(self): + models_to_check = self.kept_model_keys.union( + self.kept_proxy_keys, self.kept_unmanaged_keys + ) + for app_label, model_name in sorted(models_to_check): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + + old_db_table_comment = old_model_state.options.get("db_table_comment") + new_db_table_comment = new_model_state.options.get("db_table_comment") + if old_db_table_comment != new_db_table_comment: + self.add_operation( + app_label, + operations.AlterModelTableComment( + name=model_name, + table_comment=new_db_table_comment, + ), + ) + + def generate_altered_options(self): + """ + Work out if any non-schema-affecting options have changed and make an + operation to represent them in state changes (in case Python code in + migrations needs them). + """ + models_to_check = self.kept_model_keys.union( + self.kept_proxy_keys, + self.kept_unmanaged_keys, + # unmanaged converted to managed + self.old_unmanaged_keys & self.new_model_keys, + # managed converted to unmanaged + self.old_model_keys & self.new_unmanaged_keys, + ) + + for app_label, model_name in sorted(models_to_check): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + old_options = { + key: value + for key, value in old_model_state.options.items() + if key in AlterModelOptions.ALTER_OPTION_KEYS + } + new_options = { + key: value + for key, value in new_model_state.options.items() + if key in AlterModelOptions.ALTER_OPTION_KEYS + } + if old_options != new_options: + self.add_operation( + app_label, + operations.AlterModelOptions( + name=model_name, + options=new_options, + ), + ) + + def generate_altered_order_with_respect_to(self): + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + if old_model_state.options.get( + "order_with_respect_to" + ) != new_model_state.options.get("order_with_respect_to"): + # Make sure it comes second if we're adding + # (removal dependency is part of RemoveField) + dependencies = [] + if new_model_state.options.get("order_with_respect_to"): + dependencies.append( + ( + app_label, + model_name, + new_model_state.options["order_with_respect_to"], + True, + ) + ) + # Actually generate the operation + self.add_operation( + app_label, + operations.AlterOrderWithRespectTo( + name=model_name, + order_with_respect_to=new_model_state.options.get( + "order_with_respect_to" + ), + ), + dependencies=dependencies, + ) + + def generate_altered_managers(self): + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get( + (app_label, model_name), model_name + ) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + if old_model_state.managers != new_model_state.managers: + self.add_operation( + app_label, + operations.AlterModelManagers( + name=model_name, + managers=new_model_state.managers, + ), + ) + + def arrange_for_graph(self, changes, graph, migration_name=None): + """ + Take a result from changes() and a MigrationGraph, and fix the names + and dependencies of the changes so they extend the graph from the leaf + nodes for each app. + """ + leaves = graph.leaf_nodes() + name_map = {} + for app_label, migrations in list(changes.items()): + if not migrations: + continue + # Find the app label's current leaf node + app_leaf = None + for leaf in leaves: + if leaf[0] == app_label: + app_leaf = leaf + break + # Do they want an initial migration for this app? + if app_leaf is None and not self.questioner.ask_initial(app_label): + # They don't. + for migration in migrations: + name_map[(app_label, migration.name)] = (app_label, "__first__") + del changes[app_label] + continue + # Work out the next number in the sequence + if app_leaf is None: + next_number = 1 + else: + next_number = (self.parse_number(app_leaf[1]) or 0) + 1 + # Name each migration + for i, migration in enumerate(migrations): + if i == 0 and app_leaf: + migration.dependencies.append(app_leaf) + new_name_parts = ["%04i" % next_number] + if migration_name: + new_name_parts.append(migration_name) + elif i == 0 and not app_leaf: + new_name_parts.append("initial") + else: + new_name_parts.append(migration.suggest_name()[:100]) + new_name = "_".join(new_name_parts) + name_map[(app_label, migration.name)] = (app_label, new_name) + next_number += 1 + migration.name = new_name + # Now fix dependencies + for migrations in changes.values(): + for migration in migrations: + migration.dependencies = [ + name_map.get(d, d) for d in migration.dependencies + ] + return changes + + def _trim_to_apps(self, changes, app_labels): + """ + Take changes from arrange_for_graph() and set of app labels, and return + a modified set of changes which trims out as many migrations that are + not in app_labels as possible. Note that some other migrations may + still be present as they may be required dependencies. + """ + # Gather other app dependencies in a first pass + app_dependencies = {} + for app_label, migrations in changes.items(): + for migration in migrations: + for dep_app_label, name in migration.dependencies: + app_dependencies.setdefault(app_label, set()).add(dep_app_label) + required_apps = set(app_labels) + # Keep resolving till there's no change + old_required_apps = None + while old_required_apps != required_apps: + old_required_apps = set(required_apps) + required_apps.update( + *[app_dependencies.get(app_label, ()) for app_label in required_apps] + ) + # Remove all migrations that aren't needed + for app_label in list(changes): + if app_label not in required_apps: + del changes[app_label] + return changes + + @classmethod + def parse_number(cls, name): + """ + Given a migration name, try to extract a number from the beginning of + it. For a squashed migration such as '0001_squashed_0004…', return the + second number. If no number is found, return None. + """ + if squashed_match := re.search(r".*_squashed_(\d+)", name): + return int(squashed_match[1]) + match = re.match(r"^\d+", name) + if match: + return int(match[0]) + return None diff --git a/testbed/django__django/django/db/migrations/exceptions.py b/testbed/django__django/django/db/migrations/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..dd556dacb58b0e1d5510fee462c71be4b9dc0ebf --- /dev/null +++ b/testbed/django__django/django/db/migrations/exceptions.py @@ -0,0 +1,60 @@ +from django.db import DatabaseError + + +class AmbiguityError(Exception): + """More than one migration matches a name prefix.""" + + pass + + +class BadMigrationError(Exception): + """There's a bad migration (unreadable/bad format/etc.).""" + + pass + + +class CircularDependencyError(Exception): + """There's an impossible-to-resolve circular dependency.""" + + pass + + +class InconsistentMigrationHistory(Exception): + """An applied migration has some of its dependencies not applied.""" + + pass + + +class InvalidBasesError(ValueError): + """A model's base classes can't be resolved.""" + + pass + + +class IrreversibleError(RuntimeError): + """An irreversible migration is about to be reversed.""" + + pass + + +class NodeNotFoundError(LookupError): + """An attempt on a node is made that is not available in the graph.""" + + def __init__(self, message, node, origin=None): + self.message = message + self.origin = origin + self.node = node + + def __str__(self): + return self.message + + def __repr__(self): + return "NodeNotFoundError(%r)" % (self.node,) + + +class MigrationSchemaMissing(DatabaseError): + pass + + +class InvalidMigrationPlan(ValueError): + pass diff --git a/testbed/django__django/django/db/migrations/executor.py b/testbed/django__django/django/db/migrations/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..eb738cf4571f4354f159b2fae3c3bf80a70464bc --- /dev/null +++ b/testbed/django__django/django/db/migrations/executor.py @@ -0,0 +1,410 @@ +from django.apps.registry import apps as global_apps +from django.db import migrations, router + +from .exceptions import InvalidMigrationPlan +from .loader import MigrationLoader +from .recorder import MigrationRecorder +from .state import ProjectState + + +class MigrationExecutor: + """ + End-to-end migration execution - load migrations and run them up or down + to a specified set of targets. + """ + + def __init__(self, connection, progress_callback=None): + self.connection = connection + self.loader = MigrationLoader(self.connection) + self.recorder = MigrationRecorder(self.connection) + self.progress_callback = progress_callback + + def migration_plan(self, targets, clean_start=False): + """ + Given a set of targets, return a list of (Migration instance, backwards?). + """ + plan = [] + if clean_start: + applied = {} + else: + applied = dict(self.loader.applied_migrations) + for target in targets: + # If the target is (app_label, None), that means unmigrate everything + if target[1] is None: + for root in self.loader.graph.root_nodes(): + if root[0] == target[0]: + for migration in self.loader.graph.backwards_plan(root): + if migration in applied: + plan.append((self.loader.graph.nodes[migration], True)) + applied.pop(migration) + # If the migration is already applied, do backwards mode, + # otherwise do forwards mode. + elif target in applied: + # If the target is missing, it's likely a replaced migration. + # Reload the graph without replacements. + if ( + self.loader.replace_migrations + and target not in self.loader.graph.node_map + ): + self.loader.replace_migrations = False + self.loader.build_graph() + return self.migration_plan(targets, clean_start=clean_start) + # Don't migrate backwards all the way to the target node (that + # may roll back dependencies in other apps that don't need to + # be rolled back); instead roll back through target's immediate + # child(ren) in the same app, and no further. + next_in_app = sorted( + n + for n in self.loader.graph.node_map[target].children + if n[0] == target[0] + ) + for node in next_in_app: + for migration in self.loader.graph.backwards_plan(node): + if migration in applied: + plan.append((self.loader.graph.nodes[migration], True)) + applied.pop(migration) + else: + for migration in self.loader.graph.forwards_plan(target): + if migration not in applied: + plan.append((self.loader.graph.nodes[migration], False)) + applied[migration] = self.loader.graph.nodes[migration] + return plan + + def _create_project_state(self, with_applied_migrations=False): + """ + Create a project state including all the applications without + migrations and applied migrations if with_applied_migrations=True. + """ + state = ProjectState(real_apps=self.loader.unmigrated_apps) + if with_applied_migrations: + # Create the forwards plan Django would follow on an empty database + full_plan = self.migration_plan( + self.loader.graph.leaf_nodes(), clean_start=True + ) + applied_migrations = { + self.loader.graph.nodes[key] + for key in self.loader.applied_migrations + if key in self.loader.graph.nodes + } + for migration, _ in full_plan: + if migration in applied_migrations: + migration.mutate_state(state, preserve=False) + return state + + def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False): + """ + Migrate the database up to the given targets. + + Django first needs to create all project states before a migration is + (un)applied and in a second step run all the database operations. + """ + # The django_migrations table must be present to record applied + # migrations, but don't create it if there are no migrations to apply. + if plan == []: + if not self.recorder.has_table(): + return self._create_project_state(with_applied_migrations=False) + else: + self.recorder.ensure_schema() + + if plan is None: + plan = self.migration_plan(targets) + # Create the forwards plan Django would follow on an empty database + full_plan = self.migration_plan( + self.loader.graph.leaf_nodes(), clean_start=True + ) + + all_forwards = all(not backwards for mig, backwards in plan) + all_backwards = all(backwards for mig, backwards in plan) + + if not plan: + if state is None: + # The resulting state should include applied migrations. + state = self._create_project_state(with_applied_migrations=True) + elif all_forwards == all_backwards: + # This should only happen if there's a mixed plan + raise InvalidMigrationPlan( + "Migration plans with both forwards and backwards migrations " + "are not supported. Please split your migration process into " + "separate plans of only forwards OR backwards migrations.", + plan, + ) + elif all_forwards: + if state is None: + # The resulting state should still include applied migrations. + state = self._create_project_state(with_applied_migrations=True) + state = self._migrate_all_forwards( + state, plan, full_plan, fake=fake, fake_initial=fake_initial + ) + else: + # No need to check for `elif all_backwards` here, as that condition + # would always evaluate to true. + state = self._migrate_all_backwards(plan, full_plan, fake=fake) + + self.check_replacements() + + return state + + def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial): + """ + Take a list of 2-tuples of the form (migration instance, False) and + apply them in the order they occur in the full_plan. + """ + migrations_to_run = {m[0] for m in plan} + for migration, _ in full_plan: + if not migrations_to_run: + # We remove every migration that we applied from these sets so + # that we can bail out once the last migration has been applied + # and don't always run until the very end of the migration + # process. + break + if migration in migrations_to_run: + if "apps" not in state.__dict__: + if self.progress_callback: + self.progress_callback("render_start") + state.apps # Render all -- performance critical + if self.progress_callback: + self.progress_callback("render_success") + state = self.apply_migration( + state, migration, fake=fake, fake_initial=fake_initial + ) + migrations_to_run.remove(migration) + + return state + + def _migrate_all_backwards(self, plan, full_plan, fake): + """ + Take a list of 2-tuples of the form (migration instance, True) and + unapply them in reverse order they occur in the full_plan. + + Since unapplying a migration requires the project state prior to that + migration, Django will compute the migration states before each of them + in a first run over the plan and then unapply them in a second run over + the plan. + """ + migrations_to_run = {m[0] for m in plan} + # Holds all migration states prior to the migrations being unapplied + states = {} + state = self._create_project_state() + applied_migrations = { + self.loader.graph.nodes[key] + for key in self.loader.applied_migrations + if key in self.loader.graph.nodes + } + if self.progress_callback: + self.progress_callback("render_start") + for migration, _ in full_plan: + if not migrations_to_run: + # We remove every migration that we applied from this set so + # that we can bail out once the last migration has been applied + # and don't always run until the very end of the migration + # process. + break + if migration in migrations_to_run: + if "apps" not in state.__dict__: + state.apps # Render all -- performance critical + # The state before this migration + states[migration] = state + # The old state keeps as-is, we continue with the new state + state = migration.mutate_state(state, preserve=True) + migrations_to_run.remove(migration) + elif migration in applied_migrations: + # Only mutate the state if the migration is actually applied + # to make sure the resulting state doesn't include changes + # from unrelated migrations. + migration.mutate_state(state, preserve=False) + if self.progress_callback: + self.progress_callback("render_success") + + for migration, _ in plan: + self.unapply_migration(states[migration], migration, fake=fake) + applied_migrations.remove(migration) + + # Generate the post migration state by starting from the state before + # the last migration is unapplied and mutating it to include all the + # remaining applied migrations. + last_unapplied_migration = plan[-1][0] + state = states[last_unapplied_migration] + for index, (migration, _) in enumerate(full_plan): + if migration == last_unapplied_migration: + for migration, _ in full_plan[index:]: + if migration in applied_migrations: + migration.mutate_state(state, preserve=False) + break + + return state + + def apply_migration(self, state, migration, fake=False, fake_initial=False): + """Run a migration forwards.""" + migration_recorded = False + if self.progress_callback: + self.progress_callback("apply_start", migration, fake) + if not fake: + if fake_initial: + # Test to see if this is an already-applied initial migration + applied, state = self.detect_soft_applied(state, migration) + if applied: + fake = True + if not fake: + # Alright, do it normally + with self.connection.schema_editor( + atomic=migration.atomic + ) as schema_editor: + state = migration.apply(state, schema_editor) + if not schema_editor.deferred_sql: + self.record_migration(migration) + migration_recorded = True + if not migration_recorded: + self.record_migration(migration) + # Report progress + if self.progress_callback: + self.progress_callback("apply_success", migration, fake) + return state + + def record_migration(self, migration): + # For replacement migrations, record individual statuses + if migration.replaces: + for app_label, name in migration.replaces: + self.recorder.record_applied(app_label, name) + else: + self.recorder.record_applied(migration.app_label, migration.name) + + def unapply_migration(self, state, migration, fake=False): + """Run a migration backwards.""" + if self.progress_callback: + self.progress_callback("unapply_start", migration, fake) + if not fake: + with self.connection.schema_editor( + atomic=migration.atomic + ) as schema_editor: + state = migration.unapply(state, schema_editor) + # For replacement migrations, also record individual statuses. + if migration.replaces: + for app_label, name in migration.replaces: + self.recorder.record_unapplied(app_label, name) + self.recorder.record_unapplied(migration.app_label, migration.name) + # Report progress + if self.progress_callback: + self.progress_callback("unapply_success", migration, fake) + return state + + def check_replacements(self): + """ + Mark replacement migrations applied if their replaced set all are. + + Do this unconditionally on every migrate, rather than just when + migrations are applied or unapplied, to correctly handle the case + when a new squash migration is pushed to a deployment that already had + all its replaced migrations applied. In this case no new migration will + be applied, but the applied state of the squashed migration must be + maintained. + """ + applied = self.recorder.applied_migrations() + for key, migration in self.loader.replacements.items(): + all_applied = all(m in applied for m in migration.replaces) + if all_applied and key not in applied: + self.recorder.record_applied(*key) + + def detect_soft_applied(self, project_state, migration): + """ + Test whether a migration has been implicitly applied - that the + tables or columns it would create exist. This is intended only for use + on initial migrations (as it only looks for CreateModel and AddField). + """ + + def should_skip_detecting_model(migration, model): + """ + No need to detect tables for proxy models, unmanaged models, or + models that can't be migrated on the current database. + """ + return ( + model._meta.proxy + or not model._meta.managed + or not router.allow_migrate( + self.connection.alias, + migration.app_label, + model_name=model._meta.model_name, + ) + ) + + if migration.initial is None: + # Bail if the migration isn't the first one in its app + if any(app == migration.app_label for app, name in migration.dependencies): + return False, project_state + elif migration.initial is False: + # Bail if it's NOT an initial migration + return False, project_state + + if project_state is None: + after_state = self.loader.project_state( + (migration.app_label, migration.name), at_end=True + ) + else: + after_state = migration.mutate_state(project_state) + apps = after_state.apps + found_create_model_migration = False + found_add_field_migration = False + fold_identifier_case = self.connection.features.ignores_table_name_case + with self.connection.cursor() as cursor: + existing_table_names = set( + self.connection.introspection.table_names(cursor) + ) + if fold_identifier_case: + existing_table_names = { + name.casefold() for name in existing_table_names + } + # Make sure all create model and add field operations are done + for operation in migration.operations: + if isinstance(operation, migrations.CreateModel): + model = apps.get_model(migration.app_label, operation.name) + if model._meta.swapped: + # We have to fetch the model to test with from the + # main app cache, as it's not a direct dependency. + model = global_apps.get_model(model._meta.swapped) + if should_skip_detecting_model(migration, model): + continue + db_table = model._meta.db_table + if fold_identifier_case: + db_table = db_table.casefold() + if db_table not in existing_table_names: + return False, project_state + found_create_model_migration = True + elif isinstance(operation, migrations.AddField): + model = apps.get_model(migration.app_label, operation.model_name) + if model._meta.swapped: + # We have to fetch the model to test with from the + # main app cache, as it's not a direct dependency. + model = global_apps.get_model(model._meta.swapped) + if should_skip_detecting_model(migration, model): + continue + + table = model._meta.db_table + field = model._meta.get_field(operation.name) + + # Handle implicit many-to-many tables created by AddField. + if field.many_to_many: + through_db_table = field.remote_field.through._meta.db_table + if fold_identifier_case: + through_db_table = through_db_table.casefold() + if through_db_table not in existing_table_names: + return False, project_state + else: + found_add_field_migration = True + continue + with self.connection.cursor() as cursor: + columns = self.connection.introspection.get_table_description( + cursor, table + ) + for column in columns: + field_column = field.column + column_name = column.name + if fold_identifier_case: + column_name = column_name.casefold() + field_column = field_column.casefold() + if column_name == field_column: + found_add_field_migration = True + break + else: + return False, project_state + # If we get this far and we found at least one CreateModel or AddField + # migration, the migration is considered implicitly applied. + return (found_create_model_migration or found_add_field_migration), after_state diff --git a/testbed/django__django/django/db/migrations/graph.py b/testbed/django__django/django/db/migrations/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..dd845c13e85ed091ed55732ce303a3edb6c8d72a --- /dev/null +++ b/testbed/django__django/django/db/migrations/graph.py @@ -0,0 +1,333 @@ +from functools import total_ordering + +from django.db.migrations.state import ProjectState + +from .exceptions import CircularDependencyError, NodeNotFoundError + + +@total_ordering +class Node: + """ + A single node in the migration graph. Contains direct links to adjacent + nodes in either direction. + """ + + def __init__(self, key): + self.key = key + self.children = set() + self.parents = set() + + def __eq__(self, other): + return self.key == other + + def __lt__(self, other): + return self.key < other + + def __hash__(self): + return hash(self.key) + + def __getitem__(self, item): + return self.key[item] + + def __str__(self): + return str(self.key) + + def __repr__(self): + return "<%s: (%r, %r)>" % (self.__class__.__name__, self.key[0], self.key[1]) + + def add_child(self, child): + self.children.add(child) + + def add_parent(self, parent): + self.parents.add(parent) + + +class DummyNode(Node): + """ + A node that doesn't correspond to a migration file on disk. + (A squashed migration that was removed, for example.) + + After the migration graph is processed, all dummy nodes should be removed. + If there are any left, a nonexistent dependency error is raised. + """ + + def __init__(self, key, origin, error_message): + super().__init__(key) + self.origin = origin + self.error_message = error_message + + def raise_error(self): + raise NodeNotFoundError(self.error_message, self.key, origin=self.origin) + + +class MigrationGraph: + """ + Represent the digraph of all migrations in a project. + + Each migration is a node, and each dependency is an edge. There are + no implicit dependencies between numbered migrations - the numbering is + merely a convention to aid file listing. Every new numbered migration + has a declared dependency to the previous number, meaning that VCS + branch merges can be detected and resolved. + + Migrations files can be marked as replacing another set of migrations - + this is to support the "squash" feature. The graph handler isn't responsible + for these; instead, the code to load them in here should examine the + migration files and if the replaced migrations are all either unapplied + or not present, it should ignore the replaced ones, load in just the + replacing migration, and repoint any dependencies that pointed to the + replaced migrations to point to the replacing one. + + A node should be a tuple: (app_path, migration_name). The tree special-cases + things within an app - namely, root nodes and leaf nodes ignore dependencies + to other apps. + """ + + def __init__(self): + self.node_map = {} + self.nodes = {} + + def add_node(self, key, migration): + assert key not in self.node_map + node = Node(key) + self.node_map[key] = node + self.nodes[key] = migration + + def add_dummy_node(self, key, origin, error_message): + node = DummyNode(key, origin, error_message) + self.node_map[key] = node + self.nodes[key] = None + + def add_dependency(self, migration, child, parent, skip_validation=False): + """ + This may create dummy nodes if they don't yet exist. If + `skip_validation=True`, validate_consistency() should be called + afterward. + """ + if child not in self.nodes: + error_message = ( + "Migration %s dependencies reference nonexistent" + " child node %r" % (migration, child) + ) + self.add_dummy_node(child, migration, error_message) + if parent not in self.nodes: + error_message = ( + "Migration %s dependencies reference nonexistent" + " parent node %r" % (migration, parent) + ) + self.add_dummy_node(parent, migration, error_message) + self.node_map[child].add_parent(self.node_map[parent]) + self.node_map[parent].add_child(self.node_map[child]) + if not skip_validation: + self.validate_consistency() + + def remove_replaced_nodes(self, replacement, replaced): + """ + Remove each of the `replaced` nodes (when they exist). Any + dependencies that were referencing them are changed to reference the + `replacement` node instead. + """ + # Cast list of replaced keys to set to speed up lookup later. + replaced = set(replaced) + try: + replacement_node = self.node_map[replacement] + except KeyError as err: + raise NodeNotFoundError( + "Unable to find replacement node %r. It was either never added" + " to the migration graph, or has been removed." % (replacement,), + replacement, + ) from err + for replaced_key in replaced: + self.nodes.pop(replaced_key, None) + replaced_node = self.node_map.pop(replaced_key, None) + if replaced_node: + for child in replaced_node.children: + child.parents.remove(replaced_node) + # We don't want to create dependencies between the replaced + # node and the replacement node as this would lead to + # self-referencing on the replacement node at a later iteration. + if child.key not in replaced: + replacement_node.add_child(child) + child.add_parent(replacement_node) + for parent in replaced_node.parents: + parent.children.remove(replaced_node) + # Again, to avoid self-referencing. + if parent.key not in replaced: + replacement_node.add_parent(parent) + parent.add_child(replacement_node) + + def remove_replacement_node(self, replacement, replaced): + """ + The inverse operation to `remove_replaced_nodes`. Almost. Remove the + replacement node `replacement` and remap its child nodes to `replaced` + - the list of nodes it would have replaced. Don't remap its parent + nodes as they are expected to be correct already. + """ + self.nodes.pop(replacement, None) + try: + replacement_node = self.node_map.pop(replacement) + except KeyError as err: + raise NodeNotFoundError( + "Unable to remove replacement node %r. It was either never added" + " to the migration graph, or has been removed already." + % (replacement,), + replacement, + ) from err + replaced_nodes = set() + replaced_nodes_parents = set() + for key in replaced: + replaced_node = self.node_map.get(key) + if replaced_node: + replaced_nodes.add(replaced_node) + replaced_nodes_parents |= replaced_node.parents + # We're only interested in the latest replaced node, so filter out + # replaced nodes that are parents of other replaced nodes. + replaced_nodes -= replaced_nodes_parents + for child in replacement_node.children: + child.parents.remove(replacement_node) + for replaced_node in replaced_nodes: + replaced_node.add_child(child) + child.add_parent(replaced_node) + for parent in replacement_node.parents: + parent.children.remove(replacement_node) + # NOTE: There is no need to remap parent dependencies as we can + # assume the replaced nodes already have the correct ancestry. + + def validate_consistency(self): + """Ensure there are no dummy nodes remaining in the graph.""" + [n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)] + + def forwards_plan(self, target): + """ + Given a node, return a list of which previous nodes (dependencies) must + be applied, ending with the node itself. This is the list you would + follow if applying the migrations to a database. + """ + if target not in self.nodes: + raise NodeNotFoundError("Node %r not a valid node" % (target,), target) + return self.iterative_dfs(self.node_map[target]) + + def backwards_plan(self, target): + """ + Given a node, return a list of which dependent nodes (dependencies) + must be unapplied, ending with the node itself. This is the list you + would follow if removing the migrations from a database. + """ + if target not in self.nodes: + raise NodeNotFoundError("Node %r not a valid node" % (target,), target) + return self.iterative_dfs(self.node_map[target], forwards=False) + + def iterative_dfs(self, start, forwards=True): + """Iterative depth-first search for finding dependencies.""" + visited = [] + visited_set = set() + stack = [(start, False)] + while stack: + node, processed = stack.pop() + if node in visited_set: + pass + elif processed: + visited_set.add(node) + visited.append(node.key) + else: + stack.append((node, True)) + stack += [ + (n, False) + for n in sorted(node.parents if forwards else node.children) + ] + return visited + + def root_nodes(self, app=None): + """ + Return all root nodes - that is, nodes with no dependencies inside + their app. These are the starting point for an app. + """ + roots = set() + for node in self.nodes: + if all(key[0] != node[0] for key in self.node_map[node].parents) and ( + not app or app == node[0] + ): + roots.add(node) + return sorted(roots) + + def leaf_nodes(self, app=None): + """ + Return all leaf nodes - that is, nodes with no dependents in their app. + These are the "most current" version of an app's schema. + Having more than one per app is technically an error, but one that + gets handled further up, in the interactive command - it's usually the + result of a VCS merge and needs some user input. + """ + leaves = set() + for node in self.nodes: + if all(key[0] != node[0] for key in self.node_map[node].children) and ( + not app or app == node[0] + ): + leaves.add(node) + return sorted(leaves) + + def ensure_not_cyclic(self): + # Algo from GvR: + # https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html + todo = set(self.nodes) + while todo: + node = todo.pop() + stack = [node] + while stack: + top = stack[-1] + for child in self.node_map[top].children: + # Use child.key instead of child to speed up the frequent + # hashing. + node = child.key + if node in stack: + cycle = stack[stack.index(node) :] + raise CircularDependencyError( + ", ".join("%s.%s" % n for n in cycle) + ) + if node in todo: + stack.append(node) + todo.remove(node) + break + else: + node = stack.pop() + + def __str__(self): + return "Graph: %s nodes, %s edges" % self._nodes_and_edges() + + def __repr__(self): + nodes, edges = self._nodes_and_edges() + return "<%s: nodes=%s, edges=%s>" % (self.__class__.__name__, nodes, edges) + + def _nodes_and_edges(self): + return len(self.nodes), sum( + len(node.parents) for node in self.node_map.values() + ) + + def _generate_plan(self, nodes, at_end): + plan = [] + for node in nodes: + for migration in self.forwards_plan(node): + if migration not in plan and (at_end or migration not in nodes): + plan.append(migration) + return plan + + def make_state(self, nodes=None, at_end=True, real_apps=None): + """ + Given a migration node or nodes, return a complete ProjectState for it. + If at_end is False, return the state before the migration has run. + If nodes is not provided, return the overall most current project state. + """ + if nodes is None: + nodes = list(self.leaf_nodes()) + if not nodes: + return ProjectState() + if not isinstance(nodes[0], tuple): + nodes = [nodes] + plan = self._generate_plan(nodes, at_end) + project_state = ProjectState(real_apps=real_apps) + for node in plan: + project_state = self.nodes[node].mutate_state(project_state, preserve=False) + return project_state + + def __contains__(self, node): + return node in self.nodes diff --git a/testbed/django__django/django/db/migrations/loader.py b/testbed/django__django/django/db/migrations/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..81dcd06e04de0c13cf52a12ce8be361a71e8131c --- /dev/null +++ b/testbed/django__django/django/db/migrations/loader.py @@ -0,0 +1,385 @@ +import pkgutil +import sys +from importlib import import_module, reload + +from django.apps import apps +from django.conf import settings +from django.db.migrations.graph import MigrationGraph +from django.db.migrations.recorder import MigrationRecorder + +from .exceptions import ( + AmbiguityError, + BadMigrationError, + InconsistentMigrationHistory, + NodeNotFoundError, +) + +MIGRATIONS_MODULE_NAME = "migrations" + + +class MigrationLoader: + """ + Load migration files from disk and their status from the database. + + Migration files are expected to live in the "migrations" directory of + an app. Their names are entirely unimportant from a code perspective, + but will probably follow the 1234_name.py convention. + + On initialization, this class will scan those directories, and open and + read the Python files, looking for a class called Migration, which should + inherit from django.db.migrations.Migration. See + django.db.migrations.migration for what that looks like. + + Some migrations will be marked as "replacing" another set of migrations. + These are loaded into a separate set of migrations away from the main ones. + If all the migrations they replace are either unapplied or missing from + disk, then they are injected into the main set, replacing the named migrations. + Any dependency pointers to the replaced migrations are re-pointed to the + new migration. + + This does mean that this class MUST also talk to the database as well as + to disk, but this is probably fine. We're already not just operating + in memory. + """ + + def __init__( + self, + connection, + load=True, + ignore_no_migrations=False, + replace_migrations=True, + ): + self.connection = connection + self.disk_migrations = None + self.applied_migrations = None + self.ignore_no_migrations = ignore_no_migrations + self.replace_migrations = replace_migrations + if load: + self.build_graph() + + @classmethod + def migrations_module(cls, app_label): + """ + Return the path to the migrations module for the specified app_label + and a boolean indicating if the module is specified in + settings.MIGRATION_MODULE. + """ + if app_label in settings.MIGRATION_MODULES: + return settings.MIGRATION_MODULES[app_label], True + else: + app_package_name = apps.get_app_config(app_label).name + return "%s.%s" % (app_package_name, MIGRATIONS_MODULE_NAME), False + + def load_disk(self): + """Load the migrations from all INSTALLED_APPS from disk.""" + self.disk_migrations = {} + self.unmigrated_apps = set() + self.migrated_apps = set() + for app_config in apps.get_app_configs(): + # Get the migrations module directory + module_name, explicit = self.migrations_module(app_config.label) + if module_name is None: + self.unmigrated_apps.add(app_config.label) + continue + was_loaded = module_name in sys.modules + try: + module = import_module(module_name) + except ModuleNotFoundError as e: + if (explicit and self.ignore_no_migrations) or ( + not explicit and MIGRATIONS_MODULE_NAME in e.name.split(".") + ): + self.unmigrated_apps.add(app_config.label) + continue + raise + else: + # Module is not a package (e.g. migrations.py). + if not hasattr(module, "__path__"): + self.unmigrated_apps.add(app_config.label) + continue + # Empty directories are namespaces. Namespace packages have no + # __file__ and don't use a list for __path__. See + # https://docs.python.org/3/reference/import.html#namespace-packages + if getattr(module, "__file__", None) is None and not isinstance( + module.__path__, list + ): + self.unmigrated_apps.add(app_config.label) + continue + # Force a reload if it's already loaded (tests need this) + if was_loaded: + reload(module) + self.migrated_apps.add(app_config.label) + migration_names = { + name + for _, name, is_pkg in pkgutil.iter_modules(module.__path__) + if not is_pkg and name[0] not in "_~" + } + # Load migrations + for migration_name in migration_names: + migration_path = "%s.%s" % (module_name, migration_name) + try: + migration_module = import_module(migration_path) + except ImportError as e: + if "bad magic number" in str(e): + raise ImportError( + "Couldn't import %r as it appears to be a stale " + ".pyc file." % migration_path + ) from e + else: + raise + if not hasattr(migration_module, "Migration"): + raise BadMigrationError( + "Migration %s in app %s has no Migration class" + % (migration_name, app_config.label) + ) + self.disk_migrations[ + app_config.label, migration_name + ] = migration_module.Migration( + migration_name, + app_config.label, + ) + + def get_migration(self, app_label, name_prefix): + """Return the named migration or raise NodeNotFoundError.""" + return self.graph.nodes[app_label, name_prefix] + + def get_migration_by_prefix(self, app_label, name_prefix): + """ + Return the migration(s) which match the given app label and name_prefix. + """ + # Do the search + results = [] + for migration_app_label, migration_name in self.disk_migrations: + if migration_app_label == app_label and migration_name.startswith( + name_prefix + ): + results.append((migration_app_label, migration_name)) + if len(results) > 1: + raise AmbiguityError( + "There is more than one migration for '%s' with the prefix '%s'" + % (app_label, name_prefix) + ) + elif not results: + raise KeyError( + f"There is no migration for '{app_label}' with the prefix " + f"'{name_prefix}'" + ) + else: + return self.disk_migrations[results[0]] + + def check_key(self, key, current_app): + if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: + return key + # Special-case __first__, which means "the first migration" for + # migrated apps, and is ignored for unmigrated apps. It allows + # makemigrations to declare dependencies on apps before they even have + # migrations. + if key[0] == current_app: + # Ignore __first__ references to the same app (#22325) + return + if key[0] in self.unmigrated_apps: + # This app isn't migrated, but something depends on it. + # The models will get auto-added into the state, though + # so we're fine. + return + if key[0] in self.migrated_apps: + try: + if key[1] == "__first__": + return self.graph.root_nodes(key[0])[0] + else: # "__latest__" + return self.graph.leaf_nodes(key[0])[0] + except IndexError: + if self.ignore_no_migrations: + return None + else: + raise ValueError( + "Dependency on app with no migrations: %s" % key[0] + ) + raise ValueError("Dependency on unknown app: %s" % key[0]) + + def add_internal_dependencies(self, key, migration): + """ + Internal dependencies need to be added first to ensure `__first__` + dependencies find the correct root node. + """ + for parent in migration.dependencies: + # Ignore __first__ references to the same app. + if parent[0] == key[0] and parent[1] != "__first__": + self.graph.add_dependency(migration, key, parent, skip_validation=True) + + def add_external_dependencies(self, key, migration): + for parent in migration.dependencies: + # Skip internal dependencies + if key[0] == parent[0]: + continue + parent = self.check_key(parent, key[0]) + if parent is not None: + self.graph.add_dependency(migration, key, parent, skip_validation=True) + for child in migration.run_before: + child = self.check_key(child, key[0]) + if child is not None: + self.graph.add_dependency(migration, child, key, skip_validation=True) + + def build_graph(self): + """ + Build a migration dependency graph using both the disk and database. + You'll need to rebuild the graph if you apply migrations. This isn't + usually a problem as generally migration stuff runs in a one-shot process. + """ + # Load disk data + self.load_disk() + # Load database data + if self.connection is None: + self.applied_migrations = {} + else: + recorder = MigrationRecorder(self.connection) + self.applied_migrations = recorder.applied_migrations() + # To start, populate the migration graph with nodes for ALL migrations + # and their dependencies. Also make note of replacing migrations at this step. + self.graph = MigrationGraph() + self.replacements = {} + for key, migration in self.disk_migrations.items(): + self.graph.add_node(key, migration) + # Replacing migrations. + if migration.replaces: + self.replacements[key] = migration + for key, migration in self.disk_migrations.items(): + # Internal (same app) dependencies. + self.add_internal_dependencies(key, migration) + # Add external dependencies now that the internal ones have been resolved. + for key, migration in self.disk_migrations.items(): + self.add_external_dependencies(key, migration) + # Carry out replacements where possible and if enabled. + if self.replace_migrations: + for key, migration in self.replacements.items(): + # Get applied status of each of this migration's replacement + # targets. + applied_statuses = [ + (target in self.applied_migrations) for target in migration.replaces + ] + # The replacing migration is only marked as applied if all of + # its replacement targets are. + if all(applied_statuses): + self.applied_migrations[key] = migration + else: + self.applied_migrations.pop(key, None) + # A replacing migration can be used if either all or none of + # its replacement targets have been applied. + if all(applied_statuses) or (not any(applied_statuses)): + self.graph.remove_replaced_nodes(key, migration.replaces) + else: + # This replacing migration cannot be used because it is + # partially applied. Remove it from the graph and remap + # dependencies to it (#25945). + self.graph.remove_replacement_node(key, migration.replaces) + # Ensure the graph is consistent. + try: + self.graph.validate_consistency() + except NodeNotFoundError as exc: + # Check if the missing node could have been replaced by any squash + # migration but wasn't because the squash migration was partially + # applied before. In that case raise a more understandable exception + # (#23556). + # Get reverse replacements. + reverse_replacements = {} + for key, migration in self.replacements.items(): + for replaced in migration.replaces: + reverse_replacements.setdefault(replaced, set()).add(key) + # Try to reraise exception with more detail. + if exc.node in reverse_replacements: + candidates = reverse_replacements.get(exc.node, set()) + is_replaced = any( + candidate in self.graph.nodes for candidate in candidates + ) + if not is_replaced: + tries = ", ".join("%s.%s" % c for c in candidates) + raise NodeNotFoundError( + "Migration {0} depends on nonexistent node ('{1}', '{2}'). " + "Django tried to replace migration {1}.{2} with any of [{3}] " + "but wasn't able to because some of the replaced migrations " + "are already applied.".format( + exc.origin, exc.node[0], exc.node[1], tries + ), + exc.node, + ) from exc + raise + self.graph.ensure_not_cyclic() + + def check_consistent_history(self, connection): + """ + Raise InconsistentMigrationHistory if any applied migrations have + unapplied dependencies. + """ + recorder = MigrationRecorder(connection) + applied = recorder.applied_migrations() + for migration in applied: + # If the migration is unknown, skip it. + if migration not in self.graph.nodes: + continue + for parent in self.graph.node_map[migration].parents: + if parent not in applied: + # Skip unapplied squashed migrations that have all of their + # `replaces` applied. + if parent in self.replacements: + if all( + m in applied for m in self.replacements[parent].replaces + ): + continue + raise InconsistentMigrationHistory( + "Migration {}.{} is applied before its dependency " + "{}.{} on database '{}'.".format( + migration[0], + migration[1], + parent[0], + parent[1], + connection.alias, + ) + ) + + def detect_conflicts(self): + """ + Look through the loaded graph and detect any conflicts - apps + with more than one leaf migration. Return a dict of the app labels + that conflict with the migration names that conflict. + """ + seen_apps = {} + conflicting_apps = set() + for app_label, migration_name in self.graph.leaf_nodes(): + if app_label in seen_apps: + conflicting_apps.add(app_label) + seen_apps.setdefault(app_label, set()).add(migration_name) + return { + app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps + } + + def project_state(self, nodes=None, at_end=True): + """ + Return a ProjectState object representing the most recent state + that the loaded migrations represent. + + See graph.make_state() for the meaning of "nodes" and "at_end". + """ + return self.graph.make_state( + nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps + ) + + def collect_sql(self, plan): + """ + Take a migration plan and return a list of collected SQL statements + that represent the best-efforts version of that plan. + """ + statements = [] + state = None + for migration, backwards in plan: + with self.connection.schema_editor( + collect_sql=True, atomic=migration.atomic + ) as schema_editor: + if state is None: + state = self.project_state( + (migration.app_label, migration.name), at_end=False + ) + if not backwards: + state = migration.apply(state, schema_editor, collect_sql=True) + else: + state = migration.unapply(state, schema_editor, collect_sql=True) + statements.extend(schema_editor.collected_sql) + return statements diff --git a/testbed/django__django/django/db/migrations/migration.py b/testbed/django__django/django/db/migrations/migration.py new file mode 100644 index 0000000000000000000000000000000000000000..3c7713c5eaf1e3e753d0effa998fa684265074f1 --- /dev/null +++ b/testbed/django__django/django/db/migrations/migration.py @@ -0,0 +1,239 @@ +import re + +from django.db.migrations.utils import get_migration_name_timestamp +from django.db.transaction import atomic + +from .exceptions import IrreversibleError + + +class Migration: + """ + The base class for all migrations. + + Migration files will import this from django.db.migrations.Migration + and subclass it as a class called Migration. It will have one or more + of the following attributes: + + - operations: A list of Operation instances, probably from + django.db.migrations.operations + - dependencies: A list of tuples of (app_path, migration_name) + - run_before: A list of tuples of (app_path, migration_name) + - replaces: A list of migration_names + + Note that all migrations come out of migrations and into the Loader or + Graph as instances, having been initialized with their app label and name. + """ + + # Operations to apply during this migration, in order. + operations = [] + + # Other migrations that should be run before this migration. + # Should be a list of (app, migration_name). + dependencies = [] + + # Other migrations that should be run after this one (i.e. have + # this migration added to their dependencies). Useful to make third-party + # apps' migrations run after your AUTH_USER replacement, for example. + run_before = [] + + # Migration names in this app that this migration replaces. If this is + # non-empty, this migration will only be applied if all these migrations + # are not applied. + replaces = [] + + # Is this an initial migration? Initial migrations are skipped on + # --fake-initial if the table or fields already exist. If None, check if + # the migration has any dependencies to determine if there are dependencies + # to tell if db introspection needs to be done. If True, always perform + # introspection. If False, never perform introspection. + initial = None + + # Whether to wrap the whole migration in a transaction. Only has an effect + # on database backends which support transactional DDL. + atomic = True + + def __init__(self, name, app_label): + self.name = name + self.app_label = app_label + # Copy dependencies & other attrs as we might mutate them at runtime + self.operations = list(self.__class__.operations) + self.dependencies = list(self.__class__.dependencies) + self.run_before = list(self.__class__.run_before) + self.replaces = list(self.__class__.replaces) + + def __eq__(self, other): + return ( + isinstance(other, Migration) + and self.name == other.name + and self.app_label == other.app_label + ) + + def __repr__(self): + return "" % (self.app_label, self.name) + + def __str__(self): + return "%s.%s" % (self.app_label, self.name) + + def __hash__(self): + return hash("%s.%s" % (self.app_label, self.name)) + + def mutate_state(self, project_state, preserve=True): + """ + Take a ProjectState and return a new one with the migration's + operations applied to it. Preserve the original object state by + default and return a mutated state from a copy. + """ + new_state = project_state + if preserve: + new_state = project_state.clone() + + for operation in self.operations: + operation.state_forwards(self.app_label, new_state) + return new_state + + def apply(self, project_state, schema_editor, collect_sql=False): + """ + Take a project_state representing all migrations prior to this one + and a schema_editor for a live database and apply the migration + in a forwards order. + + Return the resulting project state for efficient reuse by following + Migrations. + """ + for operation in self.operations: + # If this operation cannot be represented as SQL, place a comment + # there instead + if collect_sql: + schema_editor.collected_sql.append("--") + schema_editor.collected_sql.append("-- %s" % operation.describe()) + schema_editor.collected_sql.append("--") + if not operation.reduces_to_sql: + schema_editor.collected_sql.append( + "-- THIS OPERATION CANNOT BE WRITTEN AS SQL" + ) + continue + collected_sql_before = len(schema_editor.collected_sql) + # Save the state before the operation has run + old_state = project_state.clone() + operation.state_forwards(self.app_label, project_state) + # Run the operation + atomic_operation = operation.atomic or ( + self.atomic and operation.atomic is not False + ) + if not schema_editor.atomic_migration and atomic_operation: + # Force a transaction on a non-transactional-DDL backend or an + # atomic operation inside a non-atomic migration. + with atomic(schema_editor.connection.alias): + operation.database_forwards( + self.app_label, schema_editor, old_state, project_state + ) + else: + # Normal behaviour + operation.database_forwards( + self.app_label, schema_editor, old_state, project_state + ) + if collect_sql and collected_sql_before == len(schema_editor.collected_sql): + schema_editor.collected_sql.append("-- (no-op)") + return project_state + + def unapply(self, project_state, schema_editor, collect_sql=False): + """ + Take a project_state representing all migrations prior to this one + and a schema_editor for a live database and apply the migration + in a reverse order. + + The backwards migration process consists of two phases: + + 1. The intermediate states from right before the first until right + after the last operation inside this migration are preserved. + 2. The operations are applied in reverse order using the states + recorded in step 1. + """ + # Construct all the intermediate states we need for a reverse migration + to_run = [] + new_state = project_state + # Phase 1 + for operation in self.operations: + # If it's irreversible, error out + if not operation.reversible: + raise IrreversibleError( + "Operation %s in %s is not reversible" % (operation, self) + ) + # Preserve new state from previous run to not tamper the same state + # over all operations + new_state = new_state.clone() + old_state = new_state.clone() + operation.state_forwards(self.app_label, new_state) + to_run.insert(0, (operation, old_state, new_state)) + + # Phase 2 + for operation, to_state, from_state in to_run: + if collect_sql: + schema_editor.collected_sql.append("--") + schema_editor.collected_sql.append("-- %s" % operation.describe()) + schema_editor.collected_sql.append("--") + if not operation.reduces_to_sql: + schema_editor.collected_sql.append( + "-- THIS OPERATION CANNOT BE WRITTEN AS SQL" + ) + continue + collected_sql_before = len(schema_editor.collected_sql) + atomic_operation = operation.atomic or ( + self.atomic and operation.atomic is not False + ) + if not schema_editor.atomic_migration and atomic_operation: + # Force a transaction on a non-transactional-DDL backend or an + # atomic operation inside a non-atomic migration. + with atomic(schema_editor.connection.alias): + operation.database_backwards( + self.app_label, schema_editor, from_state, to_state + ) + else: + # Normal behaviour + operation.database_backwards( + self.app_label, schema_editor, from_state, to_state + ) + if collect_sql and collected_sql_before == len(schema_editor.collected_sql): + schema_editor.collected_sql.append("-- (no-op)") + return project_state + + def suggest_name(self): + """ + Suggest a name for the operations this migration might represent. Names + are not guaranteed to be unique, but put some effort into the fallback + name to avoid VCS conflicts if possible. + """ + if self.initial: + return "initial" + + raw_fragments = [op.migration_name_fragment for op in self.operations] + fragments = [re.sub(r"\W+", "_", name) for name in raw_fragments if name] + + if not fragments or len(fragments) != len(self.operations): + return "auto_%s" % get_migration_name_timestamp() + + name = fragments[0] + for fragment in fragments[1:]: + new_name = f"{name}_{fragment}" + if len(new_name) > 52: + name = f"{name}_and_more" + break + name = new_name + return name + + +class SwappableTuple(tuple): + """ + Subclass of tuple so Django can tell this was originally a swappable + dependency when it reads the migration file. + """ + + def __new__(cls, value, setting): + self = tuple.__new__(cls, value) + self.setting = setting + return self + + +def swappable_dependency(value): + """Turn a setting value into a dependency.""" + return SwappableTuple((value.split(".", 1)[0], "__first__"), value) diff --git a/testbed/django__django/django/db/migrations/operations/__init__.py b/testbed/django__django/django/db/migrations/operations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..90dbdf8256b9dfb8e7d4dbc565bb3ed6ce47b33b --- /dev/null +++ b/testbed/django__django/django/db/migrations/operations/__init__.py @@ -0,0 +1,44 @@ +from .fields import AddField, AlterField, RemoveField, RenameField +from .models import ( + AddConstraint, + AddIndex, + AlterIndexTogether, + AlterModelManagers, + AlterModelOptions, + AlterModelTable, + AlterModelTableComment, + AlterOrderWithRespectTo, + AlterUniqueTogether, + CreateModel, + DeleteModel, + RemoveConstraint, + RemoveIndex, + RenameIndex, + RenameModel, +) +from .special import RunPython, RunSQL, SeparateDatabaseAndState + +__all__ = [ + "CreateModel", + "DeleteModel", + "AlterModelTable", + "AlterModelTableComment", + "AlterUniqueTogether", + "RenameModel", + "AlterIndexTogether", + "AlterModelOptions", + "AddIndex", + "RemoveIndex", + "RenameIndex", + "AddField", + "RemoveField", + "AlterField", + "RenameField", + "AddConstraint", + "RemoveConstraint", + "SeparateDatabaseAndState", + "RunSQL", + "RunPython", + "AlterOrderWithRespectTo", + "AlterModelManagers", +] diff --git a/testbed/django__django/django/db/migrations/operations/base.py b/testbed/django__django/django/db/migrations/operations/base.py new file mode 100644 index 0000000000000000000000000000000000000000..7d4dff2597464bf9af895657b74fa8c07e934058 --- /dev/null +++ b/testbed/django__django/django/db/migrations/operations/base.py @@ -0,0 +1,146 @@ +from django.db import router + + +class Operation: + """ + Base class for migration operations. + + It's responsible for both mutating the in-memory model state + (see db/migrations/state.py) to represent what it performs, as well + as actually performing it against a live database. + + Note that some operations won't modify memory state at all (e.g. data + copying operations), and some will need their modifications to be + optionally specified by the user (e.g. custom Python code snippets) + + Due to the way this class deals with deconstruction, it should be + considered immutable. + """ + + # If this migration can be run in reverse. + # Some operations are impossible to reverse, like deleting data. + reversible = True + + # Can this migration be represented as SQL? (things like RunPython cannot) + reduces_to_sql = True + + # Should this operation be forced as atomic even on backends with no + # DDL transaction support (i.e., does it have no DDL, like RunPython) + atomic = False + + # Should this operation be considered safe to elide and optimize across? + elidable = False + + serialization_expand_args = [] + + def __new__(cls, *args, **kwargs): + # We capture the arguments to make returning them trivial + self = object.__new__(cls) + self._constructor_args = (args, kwargs) + return self + + def deconstruct(self): + """ + Return a 3-tuple of class import path (or just name if it lives + under django.db.migrations), positional arguments, and keyword + arguments. + """ + return ( + self.__class__.__name__, + self._constructor_args[0], + self._constructor_args[1], + ) + + def state_forwards(self, app_label, state): + """ + Take the state from the previous migration, and mutate it + so that it matches what this migration would perform. + """ + raise NotImplementedError( + "subclasses of Operation must provide a state_forwards() method" + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + """ + Perform the mutation on the database schema in the normal + (forwards) direction. + """ + raise NotImplementedError( + "subclasses of Operation must provide a database_forwards() method" + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + """ + Perform the mutation on the database schema in the reverse + direction - e.g. if this were CreateModel, it would in fact + drop the model's table. + """ + raise NotImplementedError( + "subclasses of Operation must provide a database_backwards() method" + ) + + def describe(self): + """ + Output a brief summary of what the action does. + """ + return "%s: %s" % (self.__class__.__name__, self._constructor_args) + + @property + def migration_name_fragment(self): + """ + A filename part suitable for automatically naming a migration + containing this operation, or None if not applicable. + """ + return None + + def references_model(self, name, app_label): + """ + Return True if there is a chance this operation references the given + model name (as a string), with an app label for accuracy. + + Used for optimization. If in doubt, return True; + returning a false positive will merely make the optimizer a little + less efficient, while returning a false negative may result in an + unusable optimized migration. + """ + return True + + def references_field(self, model_name, name, app_label): + """ + Return True if there is a chance this operation references the given + field name, with an app label for accuracy. + + Used for optimization. If in doubt, return True. + """ + return self.references_model(model_name, app_label) + + def allow_migrate_model(self, connection_alias, model): + """ + Return whether or not a model may be migrated. + + This is a thin wrapper around router.allow_migrate_model() that + preemptively rejects any proxy, swapped out, or unmanaged model. + """ + if not model._meta.can_migrate(connection_alias): + return False + + return router.allow_migrate_model(connection_alias, model) + + def reduce(self, operation, app_label): + """ + Return either a list of operations the actual operation should be + replaced with or a boolean that indicates whether or not the specified + operation can be optimized across. + """ + if self.elidable: + return [operation] + elif operation.elidable: + return [self] + return False + + def __repr__(self): + return "<%s %s%s>" % ( + self.__class__.__name__, + ", ".join(map(repr, self._constructor_args[0])), + ",".join(" %s=%r" % x for x in self._constructor_args[1].items()), + ) diff --git a/testbed/django__django/django/db/migrations/operations/fields.py b/testbed/django__django/django/db/migrations/operations/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..fc5640bea99f45193cd4e54b565976f118ee9682 --- /dev/null +++ b/testbed/django__django/django/db/migrations/operations/fields.py @@ -0,0 +1,357 @@ +from django.db.migrations.utils import field_references +from django.db.models import NOT_PROVIDED +from django.utils.functional import cached_property + +from .base import Operation + + +class FieldOperation(Operation): + def __init__(self, model_name, name, field=None): + self.model_name = model_name + self.name = name + self.field = field + + @cached_property + def model_name_lower(self): + return self.model_name.lower() + + @cached_property + def name_lower(self): + return self.name.lower() + + def is_same_model_operation(self, operation): + return self.model_name_lower == operation.model_name_lower + + def is_same_field_operation(self, operation): + return ( + self.is_same_model_operation(operation) + and self.name_lower == operation.name_lower + ) + + def references_model(self, name, app_label): + name_lower = name.lower() + if name_lower == self.model_name_lower: + return True + if self.field: + return bool( + field_references( + (app_label, self.model_name_lower), + self.field, + (app_label, name_lower), + ) + ) + return False + + def references_field(self, model_name, name, app_label): + model_name_lower = model_name.lower() + # Check if this operation locally references the field. + if model_name_lower == self.model_name_lower: + if name == self.name: + return True + elif ( + self.field + and hasattr(self.field, "from_fields") + and name in self.field.from_fields + ): + return True + # Check if this operation remotely references the field. + if self.field is None: + return False + return bool( + field_references( + (app_label, self.model_name_lower), + self.field, + (app_label, model_name_lower), + name, + ) + ) + + def reduce(self, operation, app_label): + return super().reduce(operation, app_label) or not operation.references_field( + self.model_name, self.name, app_label + ) + + +class AddField(FieldOperation): + """Add a field to a model.""" + + def __init__(self, model_name, name, field, preserve_default=True): + self.preserve_default = preserve_default + super().__init__(model_name, name, field) + + def deconstruct(self): + kwargs = { + "model_name": self.model_name, + "name": self.name, + "field": self.field, + } + if self.preserve_default is not True: + kwargs["preserve_default"] = self.preserve_default + return (self.__class__.__name__, [], kwargs) + + def state_forwards(self, app_label, state): + state.add_field( + app_label, + self.model_name_lower, + self.name, + self.field, + self.preserve_default, + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + field = to_model._meta.get_field(self.name) + if not self.preserve_default: + field.default = self.field.default + schema_editor.add_field( + from_model, + field, + ) + if not self.preserve_default: + field.default = NOT_PROVIDED + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + from_model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, from_model): + schema_editor.remove_field( + from_model, from_model._meta.get_field(self.name) + ) + + def describe(self): + return "Add field %s to %s" % (self.name, self.model_name) + + @property + def migration_name_fragment(self): + return "%s_%s" % (self.model_name_lower, self.name_lower) + + def reduce(self, operation, app_label): + if isinstance(operation, FieldOperation) and self.is_same_field_operation( + operation + ): + if isinstance(operation, AlterField): + return [ + AddField( + model_name=self.model_name, + name=operation.name, + field=operation.field, + ), + ] + elif isinstance(operation, RemoveField): + return [] + elif isinstance(operation, RenameField): + return [ + AddField( + model_name=self.model_name, + name=operation.new_name, + field=self.field, + ), + ] + return super().reduce(operation, app_label) + + +class RemoveField(FieldOperation): + """Remove a field from a model.""" + + def deconstruct(self): + kwargs = { + "model_name": self.model_name, + "name": self.name, + } + return (self.__class__.__name__, [], kwargs) + + def state_forwards(self, app_label, state): + state.remove_field(app_label, self.model_name_lower, self.name) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + from_model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, from_model): + schema_editor.remove_field( + from_model, from_model._meta.get_field(self.name) + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + schema_editor.add_field(from_model, to_model._meta.get_field(self.name)) + + def describe(self): + return "Remove field %s from %s" % (self.name, self.model_name) + + @property + def migration_name_fragment(self): + return "remove_%s_%s" % (self.model_name_lower, self.name_lower) + + def reduce(self, operation, app_label): + from .models import DeleteModel + + if ( + isinstance(operation, DeleteModel) + and operation.name_lower == self.model_name_lower + ): + return [operation] + return super().reduce(operation, app_label) + + +class AlterField(FieldOperation): + """ + Alter a field's database column (e.g. null, max_length) to the provided + new field. + """ + + def __init__(self, model_name, name, field, preserve_default=True): + self.preserve_default = preserve_default + super().__init__(model_name, name, field) + + def deconstruct(self): + kwargs = { + "model_name": self.model_name, + "name": self.name, + "field": self.field, + } + if self.preserve_default is not True: + kwargs["preserve_default"] = self.preserve_default + return (self.__class__.__name__, [], kwargs) + + def state_forwards(self, app_label, state): + state.alter_field( + app_label, + self.model_name_lower, + self.name, + self.field, + self.preserve_default, + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + from_field = from_model._meta.get_field(self.name) + to_field = to_model._meta.get_field(self.name) + if not self.preserve_default: + to_field.default = self.field.default + schema_editor.alter_field(from_model, from_field, to_field) + if not self.preserve_default: + to_field.default = NOT_PROVIDED + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + self.database_forwards(app_label, schema_editor, from_state, to_state) + + def describe(self): + return "Alter field %s on %s" % (self.name, self.model_name) + + @property + def migration_name_fragment(self): + return "alter_%s_%s" % (self.model_name_lower, self.name_lower) + + def reduce(self, operation, app_label): + if isinstance( + operation, (AlterField, RemoveField) + ) and self.is_same_field_operation(operation): + return [operation] + elif ( + isinstance(operation, RenameField) + and self.is_same_field_operation(operation) + and self.field.db_column is None + ): + return [ + operation, + AlterField( + model_name=self.model_name, + name=operation.new_name, + field=self.field, + ), + ] + return super().reduce(operation, app_label) + + +class RenameField(FieldOperation): + """Rename a field on the model. Might affect db_column too.""" + + def __init__(self, model_name, old_name, new_name): + self.old_name = old_name + self.new_name = new_name + super().__init__(model_name, old_name) + + @cached_property + def old_name_lower(self): + return self.old_name.lower() + + @cached_property + def new_name_lower(self): + return self.new_name.lower() + + def deconstruct(self): + kwargs = { + "model_name": self.model_name, + "old_name": self.old_name, + "new_name": self.new_name, + } + return (self.__class__.__name__, [], kwargs) + + def state_forwards(self, app_label, state): + state.rename_field( + app_label, self.model_name_lower, self.old_name, self.new_name + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + schema_editor.alter_field( + from_model, + from_model._meta.get_field(self.old_name), + to_model._meta.get_field(self.new_name), + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + schema_editor.alter_field( + from_model, + from_model._meta.get_field(self.new_name), + to_model._meta.get_field(self.old_name), + ) + + def describe(self): + return "Rename field %s on %s to %s" % ( + self.old_name, + self.model_name, + self.new_name, + ) + + @property + def migration_name_fragment(self): + return "rename_%s_%s_%s" % ( + self.old_name_lower, + self.model_name_lower, + self.new_name_lower, + ) + + def references_field(self, model_name, name, app_label): + return self.references_model(model_name, app_label) and ( + name.lower() == self.old_name_lower or name.lower() == self.new_name_lower + ) + + def reduce(self, operation, app_label): + if ( + isinstance(operation, RenameField) + and self.is_same_model_operation(operation) + and self.new_name_lower == operation.old_name_lower + ): + return [ + RenameField( + self.model_name, + self.old_name, + operation.new_name, + ), + ] + # Skip `FieldOperation.reduce` as we want to run `references_field` + # against self.old_name and self.new_name. + return super(FieldOperation, self).reduce(operation, app_label) or not ( + operation.references_field(self.model_name, self.old_name, app_label) + or operation.references_field(self.model_name, self.new_name, app_label) + ) diff --git a/testbed/django__django/django/db/migrations/operations/models.py b/testbed/django__django/django/db/migrations/operations/models.py new file mode 100644 index 0000000000000000000000000000000000000000..b18ef553695ea9170913a80e720dcb805fc58688 --- /dev/null +++ b/testbed/django__django/django/db/migrations/operations/models.py @@ -0,0 +1,1219 @@ +from django.db import models +from django.db.migrations.operations.base import Operation +from django.db.migrations.state import ModelState +from django.db.migrations.utils import field_references, resolve_relation +from django.db.models.options import normalize_together +from django.utils.functional import cached_property + +from .fields import AddField, AlterField, FieldOperation, RemoveField, RenameField + + +def _check_for_duplicates(arg_name, objs): + used_vals = set() + for val in objs: + if val in used_vals: + raise ValueError( + "Found duplicate value %s in CreateModel %s argument." % (val, arg_name) + ) + used_vals.add(val) + + +class ModelOperation(Operation): + def __init__(self, name): + self.name = name + + @cached_property + def name_lower(self): + return self.name.lower() + + def references_model(self, name, app_label): + return name.lower() == self.name_lower + + def reduce(self, operation, app_label): + return super().reduce(operation, app_label) or self.can_reduce_through( + operation, app_label + ) + + def can_reduce_through(self, operation, app_label): + return not operation.references_model(self.name, app_label) + + +class CreateModel(ModelOperation): + """Create a model's table.""" + + serialization_expand_args = ["fields", "options", "managers"] + + def __init__(self, name, fields, options=None, bases=None, managers=None): + self.fields = fields + self.options = options or {} + self.bases = bases or (models.Model,) + self.managers = managers or [] + super().__init__(name) + # Sanity-check that there are no duplicated field names, bases, or + # manager names + _check_for_duplicates("fields", (name for name, _ in self.fields)) + _check_for_duplicates( + "bases", + ( + base._meta.label_lower + if hasattr(base, "_meta") + else base.lower() + if isinstance(base, str) + else base + for base in self.bases + ), + ) + _check_for_duplicates("managers", (name for name, _ in self.managers)) + + def deconstruct(self): + kwargs = { + "name": self.name, + "fields": self.fields, + } + if self.options: + kwargs["options"] = self.options + if self.bases and self.bases != (models.Model,): + kwargs["bases"] = self.bases + if self.managers and self.managers != [("objects", models.Manager())]: + kwargs["managers"] = self.managers + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.add_model( + ModelState( + app_label, + self.name, + list(self.fields), + dict(self.options), + tuple(self.bases), + list(self.managers), + ) + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.create_model(model) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.delete_model(model) + + def describe(self): + return "Create %smodel %s" % ( + "proxy " if self.options.get("proxy", False) else "", + self.name, + ) + + @property + def migration_name_fragment(self): + return self.name_lower + + def references_model(self, name, app_label): + name_lower = name.lower() + if name_lower == self.name_lower: + return True + + # Check we didn't inherit from the model + reference_model_tuple = (app_label, name_lower) + for base in self.bases: + if ( + base is not models.Model + and isinstance(base, (models.base.ModelBase, str)) + and resolve_relation(base, app_label) == reference_model_tuple + ): + return True + + # Check we have no FKs/M2Ms with it + for _name, field in self.fields: + if field_references( + (app_label, self.name_lower), field, reference_model_tuple + ): + return True + return False + + def reduce(self, operation, app_label): + if ( + isinstance(operation, DeleteModel) + and self.name_lower == operation.name_lower + and not self.options.get("proxy", False) + ): + return [] + elif ( + isinstance(operation, RenameModel) + and self.name_lower == operation.old_name_lower + ): + return [ + CreateModel( + operation.new_name, + fields=self.fields, + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + elif ( + isinstance(operation, AlterModelOptions) + and self.name_lower == operation.name_lower + ): + options = {**self.options, **operation.options} + for key in operation.ALTER_OPTION_KEYS: + if key not in operation.options: + options.pop(key, None) + return [ + CreateModel( + self.name, + fields=self.fields, + options=options, + bases=self.bases, + managers=self.managers, + ), + ] + elif ( + isinstance(operation, AlterModelManagers) + and self.name_lower == operation.name_lower + ): + return [ + CreateModel( + self.name, + fields=self.fields, + options=self.options, + bases=self.bases, + managers=operation.managers, + ), + ] + elif ( + isinstance(operation, AlterTogetherOptionOperation) + and self.name_lower == operation.name_lower + ): + return [ + CreateModel( + self.name, + fields=self.fields, + options={ + **self.options, + **{operation.option_name: operation.option_value}, + }, + bases=self.bases, + managers=self.managers, + ), + ] + elif ( + isinstance(operation, AlterOrderWithRespectTo) + and self.name_lower == operation.name_lower + ): + return [ + CreateModel( + self.name, + fields=self.fields, + options={ + **self.options, + "order_with_respect_to": operation.order_with_respect_to, + }, + bases=self.bases, + managers=self.managers, + ), + ] + elif ( + isinstance(operation, FieldOperation) + and self.name_lower == operation.model_name_lower + ): + if isinstance(operation, AddField): + return [ + CreateModel( + self.name, + fields=self.fields + [(operation.name, operation.field)], + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, AlterField): + return [ + CreateModel( + self.name, + fields=[ + (n, operation.field if n == operation.name else v) + for n, v in self.fields + ], + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, RemoveField): + options = self.options.copy() + for option_name in ("unique_together", "index_together"): + option = options.pop(option_name, None) + if option: + option = set( + filter( + bool, + ( + tuple( + f for f in fields if f != operation.name_lower + ) + for fields in option + ), + ) + ) + if option: + options[option_name] = option + order_with_respect_to = options.get("order_with_respect_to") + if order_with_respect_to == operation.name_lower: + del options["order_with_respect_to"] + return [ + CreateModel( + self.name, + fields=[ + (n, v) + for n, v in self.fields + if n.lower() != operation.name_lower + ], + options=options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, RenameField): + options = self.options.copy() + for option_name in ("unique_together", "index_together"): + option = options.get(option_name) + if option: + options[option_name] = { + tuple( + operation.new_name if f == operation.old_name else f + for f in fields + ) + for fields in option + } + order_with_respect_to = options.get("order_with_respect_to") + if order_with_respect_to == operation.old_name: + options["order_with_respect_to"] = operation.new_name + return [ + CreateModel( + self.name, + fields=[ + (operation.new_name if n == operation.old_name else n, v) + for n, v in self.fields + ], + options=options, + bases=self.bases, + managers=self.managers, + ), + ] + elif ( + isinstance(operation, IndexOperation) + and self.name_lower == operation.model_name_lower + ): + if isinstance(operation, AddIndex): + return [ + CreateModel( + self.name, + fields=self.fields, + options={ + **self.options, + "indexes": [ + *self.options.get("indexes", []), + operation.index, + ], + }, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, RemoveIndex): + options_indexes = [ + index + for index in self.options.get("indexes", []) + if index.name != operation.name + ] + return [ + CreateModel( + self.name, + fields=self.fields, + options={ + **self.options, + "indexes": options_indexes, + }, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, RenameIndex) and operation.old_fields: + options_index_together = { + fields + for fields in self.options.get("index_together", []) + if fields != operation.old_fields + } + if options_index_together: + self.options["index_together"] = options_index_together + else: + self.options.pop("index_together", None) + return [ + CreateModel( + self.name, + fields=self.fields, + options={ + **self.options, + "indexes": [ + *self.options.get("indexes", []), + models.Index( + fields=operation.old_fields, name=operation.new_name + ), + ], + }, + bases=self.bases, + managers=self.managers, + ), + ] + return super().reduce(operation, app_label) + + +class DeleteModel(ModelOperation): + """Drop a model's table.""" + + def deconstruct(self): + kwargs = { + "name": self.name, + } + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.remove_model(app_label, self.name_lower) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.delete_model(model) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.create_model(model) + + def references_model(self, name, app_label): + # The deleted model could be referencing the specified model through + # related fields. + return True + + def describe(self): + return "Delete model %s" % self.name + + @property + def migration_name_fragment(self): + return "delete_%s" % self.name_lower + + +class RenameModel(ModelOperation): + """Rename a model.""" + + def __init__(self, old_name, new_name): + self.old_name = old_name + self.new_name = new_name + super().__init__(old_name) + + @cached_property + def old_name_lower(self): + return self.old_name.lower() + + @cached_property + def new_name_lower(self): + return self.new_name.lower() + + def deconstruct(self): + kwargs = { + "old_name": self.old_name, + "new_name": self.new_name, + } + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.rename_model(app_label, self.old_name, self.new_name) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.new_name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.old_name) + # Move the main table + schema_editor.alter_db_table( + new_model, + old_model._meta.db_table, + new_model._meta.db_table, + ) + # Alter the fields pointing to us + for related_object in old_model._meta.related_objects: + if related_object.related_model == old_model: + model = new_model + related_key = (app_label, self.new_name_lower) + else: + model = related_object.related_model + related_key = ( + related_object.related_model._meta.app_label, + related_object.related_model._meta.model_name, + ) + to_field = to_state.apps.get_model(*related_key)._meta.get_field( + related_object.field.name + ) + schema_editor.alter_field( + model, + related_object.field, + to_field, + ) + # Rename M2M fields whose name is based on this model's name. + fields = zip( + old_model._meta.local_many_to_many, new_model._meta.local_many_to_many + ) + for old_field, new_field in fields: + # Skip self-referential fields as these are renamed above. + if ( + new_field.model == new_field.related_model + or not new_field.remote_field.through._meta.auto_created + ): + continue + # Rename columns and the M2M table. + schema_editor._alter_many_to_many( + new_model, + old_field, + new_field, + strict=False, + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + self.new_name_lower, self.old_name_lower = ( + self.old_name_lower, + self.new_name_lower, + ) + self.new_name, self.old_name = self.old_name, self.new_name + + self.database_forwards(app_label, schema_editor, from_state, to_state) + + self.new_name_lower, self.old_name_lower = ( + self.old_name_lower, + self.new_name_lower, + ) + self.new_name, self.old_name = self.old_name, self.new_name + + def references_model(self, name, app_label): + return ( + name.lower() == self.old_name_lower or name.lower() == self.new_name_lower + ) + + def describe(self): + return "Rename model %s to %s" % (self.old_name, self.new_name) + + @property + def migration_name_fragment(self): + return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower) + + def reduce(self, operation, app_label): + if ( + isinstance(operation, RenameModel) + and self.new_name_lower == operation.old_name_lower + ): + return [ + RenameModel( + self.old_name, + operation.new_name, + ), + ] + # Skip `ModelOperation.reduce` as we want to run `references_model` + # against self.new_name. + return super(ModelOperation, self).reduce( + operation, app_label + ) or not operation.references_model(self.new_name, app_label) + + +class ModelOptionOperation(ModelOperation): + def reduce(self, operation, app_label): + if ( + isinstance(operation, (self.__class__, DeleteModel)) + and self.name_lower == operation.name_lower + ): + return [operation] + return super().reduce(operation, app_label) + + +class AlterModelTable(ModelOptionOperation): + """Rename a model's table.""" + + def __init__(self, name, table): + self.table = table + super().__init__(name) + + def deconstruct(self): + kwargs = { + "name": self.name, + "table": self.table, + } + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.alter_model_options(app_label, self.name_lower, {"db_table": self.table}) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.name) + schema_editor.alter_db_table( + new_model, + old_model._meta.db_table, + new_model._meta.db_table, + ) + # Rename M2M fields whose name is based on this model's db_table + for old_field, new_field in zip( + old_model._meta.local_many_to_many, new_model._meta.local_many_to_many + ): + if new_field.remote_field.through._meta.auto_created: + schema_editor.alter_db_table( + new_field.remote_field.through, + old_field.remote_field.through._meta.db_table, + new_field.remote_field.through._meta.db_table, + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + return self.database_forwards(app_label, schema_editor, from_state, to_state) + + def describe(self): + return "Rename table for %s to %s" % ( + self.name, + self.table if self.table is not None else "(default)", + ) + + @property + def migration_name_fragment(self): + return "alter_%s_table" % self.name_lower + + +class AlterModelTableComment(ModelOptionOperation): + def __init__(self, name, table_comment): + self.table_comment = table_comment + super().__init__(name) + + def deconstruct(self): + kwargs = { + "name": self.name, + "table_comment": self.table_comment, + } + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.alter_model_options( + app_label, self.name_lower, {"db_table_comment": self.table_comment} + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.name) + schema_editor.alter_db_table_comment( + new_model, + old_model._meta.db_table_comment, + new_model._meta.db_table_comment, + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + return self.database_forwards(app_label, schema_editor, from_state, to_state) + + def describe(self): + return f"Alter {self.name} table comment" + + @property + def migration_name_fragment(self): + return f"alter_{self.name_lower}_table_comment" + + +class AlterTogetherOptionOperation(ModelOptionOperation): + option_name = None + + def __init__(self, name, option_value): + if option_value: + option_value = set(normalize_together(option_value)) + setattr(self, self.option_name, option_value) + super().__init__(name) + + @cached_property + def option_value(self): + return getattr(self, self.option_name) + + def deconstruct(self): + kwargs = { + "name": self.name, + self.option_name: self.option_value, + } + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.alter_model_options( + app_label, + self.name_lower, + {self.option_name: self.option_value}, + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.name) + alter_together = getattr(schema_editor, "alter_%s" % self.option_name) + alter_together( + new_model, + getattr(old_model._meta, self.option_name, set()), + getattr(new_model._meta, self.option_name, set()), + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + return self.database_forwards(app_label, schema_editor, from_state, to_state) + + def references_field(self, model_name, name, app_label): + return self.references_model(model_name, app_label) and ( + not self.option_value + or any((name in fields) for fields in self.option_value) + ) + + def describe(self): + return "Alter %s for %s (%s constraint(s))" % ( + self.option_name, + self.name, + len(self.option_value or ""), + ) + + @property + def migration_name_fragment(self): + return "alter_%s_%s" % (self.name_lower, self.option_name) + + def can_reduce_through(self, operation, app_label): + return super().can_reduce_through(operation, app_label) or ( + isinstance(operation, AlterTogetherOptionOperation) + and type(operation) is not type(self) + ) + + +class AlterUniqueTogether(AlterTogetherOptionOperation): + """ + Change the value of unique_together to the target one. + Input value of unique_together must be a set of tuples. + """ + + option_name = "unique_together" + + def __init__(self, name, unique_together): + super().__init__(name, unique_together) + + +class AlterIndexTogether(AlterTogetherOptionOperation): + """ + Change the value of index_together to the target one. + Input value of index_together must be a set of tuples. + """ + + option_name = "index_together" + + def __init__(self, name, index_together): + super().__init__(name, index_together) + + +class AlterOrderWithRespectTo(ModelOptionOperation): + """Represent a change with the order_with_respect_to option.""" + + option_name = "order_with_respect_to" + + def __init__(self, name, order_with_respect_to): + self.order_with_respect_to = order_with_respect_to + super().__init__(name) + + def deconstruct(self): + kwargs = { + "name": self.name, + "order_with_respect_to": self.order_with_respect_to, + } + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.alter_model_options( + app_label, + self.name_lower, + {self.option_name: self.order_with_respect_to}, + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.name) + # Remove a field if we need to + if ( + from_model._meta.order_with_respect_to + and not to_model._meta.order_with_respect_to + ): + schema_editor.remove_field( + from_model, from_model._meta.get_field("_order") + ) + # Add a field if we need to (altering the column is untouched as + # it's likely a rename) + elif ( + to_model._meta.order_with_respect_to + and not from_model._meta.order_with_respect_to + ): + field = to_model._meta.get_field("_order") + if not field.has_default(): + field.default = 0 + schema_editor.add_field( + from_model, + field, + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + self.database_forwards(app_label, schema_editor, from_state, to_state) + + def references_field(self, model_name, name, app_label): + return self.references_model(model_name, app_label) and ( + self.order_with_respect_to is None or name == self.order_with_respect_to + ) + + def describe(self): + return "Set order_with_respect_to on %s to %s" % ( + self.name, + self.order_with_respect_to, + ) + + @property + def migration_name_fragment(self): + return "alter_%s_order_with_respect_to" % self.name_lower + + +class AlterModelOptions(ModelOptionOperation): + """ + Set new model options that don't directly affect the database schema + (like verbose_name, permissions, ordering). Python code in migrations + may still need them. + """ + + # Model options we want to compare and preserve in an AlterModelOptions op + ALTER_OPTION_KEYS = [ + "base_manager_name", + "default_manager_name", + "default_related_name", + "get_latest_by", + "managed", + "ordering", + "permissions", + "default_permissions", + "select_on_save", + "verbose_name", + "verbose_name_plural", + ] + + def __init__(self, name, options): + self.options = options + super().__init__(name) + + def deconstruct(self): + kwargs = { + "name": self.name, + "options": self.options, + } + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + state.alter_model_options( + app_label, + self.name_lower, + self.options, + self.ALTER_OPTION_KEYS, + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + pass + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + pass + + def describe(self): + return "Change Meta options on %s" % self.name + + @property + def migration_name_fragment(self): + return "alter_%s_options" % self.name_lower + + +class AlterModelManagers(ModelOptionOperation): + """Alter the model's managers.""" + + serialization_expand_args = ["managers"] + + def __init__(self, name, managers): + self.managers = managers + super().__init__(name) + + def deconstruct(self): + return (self.__class__.__qualname__, [self.name, self.managers], {}) + + def state_forwards(self, app_label, state): + state.alter_model_managers(app_label, self.name_lower, self.managers) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + pass + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + pass + + def describe(self): + return "Change managers on %s" % self.name + + @property + def migration_name_fragment(self): + return "alter_%s_managers" % self.name_lower + + +class IndexOperation(Operation): + option_name = "indexes" + + @cached_property + def model_name_lower(self): + return self.model_name.lower() + + +class AddIndex(IndexOperation): + """Add an index on a model.""" + + def __init__(self, model_name, index): + self.model_name = model_name + if not index.name: + raise ValueError( + "Indexes passed to AddIndex operations require a name " + "argument. %r doesn't have one." % index + ) + self.index = index + + def state_forwards(self, app_label, state): + state.add_index(app_label, self.model_name_lower, self.index) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.add_index(model, self.index) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.remove_index(model, self.index) + + def deconstruct(self): + kwargs = { + "model_name": self.model_name, + "index": self.index, + } + return ( + self.__class__.__qualname__, + [], + kwargs, + ) + + def describe(self): + if self.index.expressions: + return "Create index %s on %s on model %s" % ( + self.index.name, + ", ".join([str(expression) for expression in self.index.expressions]), + self.model_name, + ) + return "Create index %s on field(s) %s of model %s" % ( + self.index.name, + ", ".join(self.index.fields), + self.model_name, + ) + + @property + def migration_name_fragment(self): + return "%s_%s" % (self.model_name_lower, self.index.name.lower()) + + def reduce(self, operation, app_label): + if isinstance(operation, RemoveIndex) and self.index.name == operation.name: + return [] + if isinstance(operation, RenameIndex) and self.index.name == operation.old_name: + self.index.name = operation.new_name + return [AddIndex(model_name=self.model_name, index=self.index)] + return super().reduce(operation, app_label) + + +class RemoveIndex(IndexOperation): + """Remove an index from a model.""" + + def __init__(self, model_name, name): + self.model_name = model_name + self.name = name + + def state_forwards(self, app_label, state): + state.remove_index(app_label, self.model_name_lower, self.name) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + from_model_state = from_state.models[app_label, self.model_name_lower] + index = from_model_state.get_index_by_name(self.name) + schema_editor.remove_index(model, index) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + to_model_state = to_state.models[app_label, self.model_name_lower] + index = to_model_state.get_index_by_name(self.name) + schema_editor.add_index(model, index) + + def deconstruct(self): + kwargs = { + "model_name": self.model_name, + "name": self.name, + } + return ( + self.__class__.__qualname__, + [], + kwargs, + ) + + def describe(self): + return "Remove index %s from %s" % (self.name, self.model_name) + + @property + def migration_name_fragment(self): + return "remove_%s_%s" % (self.model_name_lower, self.name.lower()) + + +class RenameIndex(IndexOperation): + """Rename an index.""" + + def __init__(self, model_name, new_name, old_name=None, old_fields=None): + if not old_name and not old_fields: + raise ValueError( + "RenameIndex requires one of old_name and old_fields arguments to be " + "set." + ) + if old_name and old_fields: + raise ValueError( + "RenameIndex.old_name and old_fields are mutually exclusive." + ) + self.model_name = model_name + self.new_name = new_name + self.old_name = old_name + self.old_fields = old_fields + + @cached_property + def old_name_lower(self): + return self.old_name.lower() + + @cached_property + def new_name_lower(self): + return self.new_name.lower() + + def deconstruct(self): + kwargs = { + "model_name": self.model_name, + "new_name": self.new_name, + } + if self.old_name: + kwargs["old_name"] = self.old_name + if self.old_fields: + kwargs["old_fields"] = self.old_fields + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + if self.old_fields: + state.add_index( + app_label, + self.model_name_lower, + models.Index(fields=self.old_fields, name=self.new_name), + ) + state.remove_model_options( + app_label, + self.model_name_lower, + AlterIndexTogether.option_name, + self.old_fields, + ) + else: + state.rename_index( + app_label, self.model_name_lower, self.old_name, self.new_name + ) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if not self.allow_migrate_model(schema_editor.connection.alias, model): + return + + if self.old_fields: + from_model = from_state.apps.get_model(app_label, self.model_name) + columns = [ + from_model._meta.get_field(field).column for field in self.old_fields + ] + matching_index_name = schema_editor._constraint_names( + from_model, column_names=columns, index=True + ) + if len(matching_index_name) != 1: + raise ValueError( + "Found wrong number (%s) of indexes for %s(%s)." + % ( + len(matching_index_name), + from_model._meta.db_table, + ", ".join(columns), + ) + ) + old_index = models.Index( + fields=self.old_fields, + name=matching_index_name[0], + ) + else: + from_model_state = from_state.models[app_label, self.model_name_lower] + old_index = from_model_state.get_index_by_name(self.old_name) + # Don't alter when the index name is not changed. + if old_index.name == self.new_name: + return + + to_model_state = to_state.models[app_label, self.model_name_lower] + new_index = to_model_state.get_index_by_name(self.new_name) + schema_editor.rename_index(model, old_index, new_index) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + if self.old_fields: + # Backward operation with unnamed index is a no-op. + return + + self.new_name_lower, self.old_name_lower = ( + self.old_name_lower, + self.new_name_lower, + ) + self.new_name, self.old_name = self.old_name, self.new_name + + self.database_forwards(app_label, schema_editor, from_state, to_state) + + self.new_name_lower, self.old_name_lower = ( + self.old_name_lower, + self.new_name_lower, + ) + self.new_name, self.old_name = self.old_name, self.new_name + + def describe(self): + if self.old_name: + return ( + f"Rename index {self.old_name} on {self.model_name} to {self.new_name}" + ) + return ( + f"Rename unnamed index for {self.old_fields} on {self.model_name} to " + f"{self.new_name}" + ) + + @property + def migration_name_fragment(self): + if self.old_name: + return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower) + return "rename_%s_%s_%s" % ( + self.model_name_lower, + "_".join(self.old_fields), + self.new_name_lower, + ) + + def reduce(self, operation, app_label): + if ( + isinstance(operation, RenameIndex) + and self.model_name_lower == operation.model_name_lower + and operation.old_name + and self.new_name_lower == operation.old_name_lower + ): + return [ + RenameIndex( + self.model_name, + new_name=operation.new_name, + old_name=self.old_name, + old_fields=self.old_fields, + ) + ] + return super().reduce(operation, app_label) + + +class AddConstraint(IndexOperation): + option_name = "constraints" + + def __init__(self, model_name, constraint): + self.model_name = model_name + self.constraint = constraint + + def state_forwards(self, app_label, state): + state.add_constraint(app_label, self.model_name_lower, self.constraint) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.add_constraint(model, self.constraint) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.remove_constraint(model, self.constraint) + + def deconstruct(self): + return ( + self.__class__.__name__, + [], + { + "model_name": self.model_name, + "constraint": self.constraint, + }, + ) + + def describe(self): + return "Create constraint %s on model %s" % ( + self.constraint.name, + self.model_name, + ) + + @property + def migration_name_fragment(self): + return "%s_%s" % (self.model_name_lower, self.constraint.name.lower()) + + def reduce(self, operation, app_label): + if ( + isinstance(operation, RemoveConstraint) + and self.model_name_lower == operation.model_name_lower + and self.constraint.name == operation.name + ): + return [] + return super().reduce(operation, app_label) + + +class RemoveConstraint(IndexOperation): + option_name = "constraints" + + def __init__(self, model_name, name): + self.model_name = model_name + self.name = name + + def state_forwards(self, app_label, state): + state.remove_constraint(app_label, self.model_name_lower, self.name) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + from_model_state = from_state.models[app_label, self.model_name_lower] + constraint = from_model_state.get_constraint_by_name(self.name) + schema_editor.remove_constraint(model, constraint) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + to_model_state = to_state.models[app_label, self.model_name_lower] + constraint = to_model_state.get_constraint_by_name(self.name) + schema_editor.add_constraint(model, constraint) + + def deconstruct(self): + return ( + self.__class__.__name__, + [], + { + "model_name": self.model_name, + "name": self.name, + }, + ) + + def describe(self): + return "Remove constraint %s from model %s" % (self.name, self.model_name) + + @property + def migration_name_fragment(self): + return "remove_%s_%s" % (self.model_name_lower, self.name.lower()) diff --git a/testbed/django__django/django/db/migrations/operations/special.py b/testbed/django__django/django/db/migrations/operations/special.py new file mode 100644 index 0000000000000000000000000000000000000000..94a6ec72de9a2c062d4ced984d177df5d61e3f62 --- /dev/null +++ b/testbed/django__django/django/db/migrations/operations/special.py @@ -0,0 +1,208 @@ +from django.db import router + +from .base import Operation + + +class SeparateDatabaseAndState(Operation): + """ + Take two lists of operations - ones that will be used for the database, + and ones that will be used for the state change. This allows operations + that don't support state change to have it applied, or have operations + that affect the state or not the database, or so on. + """ + + serialization_expand_args = ["database_operations", "state_operations"] + + def __init__(self, database_operations=None, state_operations=None): + self.database_operations = database_operations or [] + self.state_operations = state_operations or [] + + def deconstruct(self): + kwargs = {} + if self.database_operations: + kwargs["database_operations"] = self.database_operations + if self.state_operations: + kwargs["state_operations"] = self.state_operations + return (self.__class__.__qualname__, [], kwargs) + + def state_forwards(self, app_label, state): + for state_operation in self.state_operations: + state_operation.state_forwards(app_label, state) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + # We calculate state separately in here since our state functions aren't useful + for database_operation in self.database_operations: + to_state = from_state.clone() + database_operation.state_forwards(app_label, to_state) + database_operation.database_forwards( + app_label, schema_editor, from_state, to_state + ) + from_state = to_state + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + # We calculate state separately in here since our state functions aren't useful + to_states = {} + for dbop in self.database_operations: + to_states[dbop] = to_state + to_state = to_state.clone() + dbop.state_forwards(app_label, to_state) + # to_state now has the states of all the database_operations applied + # which is the from_state for the backwards migration of the last + # operation. + for database_operation in reversed(self.database_operations): + from_state = to_state + to_state = to_states[database_operation] + database_operation.database_backwards( + app_label, schema_editor, from_state, to_state + ) + + def describe(self): + return "Custom state/database change combination" + + +class RunSQL(Operation): + """ + Run some raw SQL. A reverse SQL statement may be provided. + + Also accept a list of operations that represent the state change effected + by this SQL change, in case it's custom column/table creation/deletion. + """ + + noop = "" + + def __init__( + self, sql, reverse_sql=None, state_operations=None, hints=None, elidable=False + ): + self.sql = sql + self.reverse_sql = reverse_sql + self.state_operations = state_operations or [] + self.hints = hints or {} + self.elidable = elidable + + def deconstruct(self): + kwargs = { + "sql": self.sql, + } + if self.reverse_sql is not None: + kwargs["reverse_sql"] = self.reverse_sql + if self.state_operations: + kwargs["state_operations"] = self.state_operations + if self.hints: + kwargs["hints"] = self.hints + return (self.__class__.__qualname__, [], kwargs) + + @property + def reversible(self): + return self.reverse_sql is not None + + def state_forwards(self, app_label, state): + for state_operation in self.state_operations: + state_operation.state_forwards(app_label, state) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + if router.allow_migrate( + schema_editor.connection.alias, app_label, **self.hints + ): + self._run_sql(schema_editor, self.sql) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + if self.reverse_sql is None: + raise NotImplementedError("You cannot reverse this operation") + if router.allow_migrate( + schema_editor.connection.alias, app_label, **self.hints + ): + self._run_sql(schema_editor, self.reverse_sql) + + def describe(self): + return "Raw SQL operation" + + def _run_sql(self, schema_editor, sqls): + if isinstance(sqls, (list, tuple)): + for sql in sqls: + params = None + if isinstance(sql, (list, tuple)): + elements = len(sql) + if elements == 2: + sql, params = sql + else: + raise ValueError("Expected a 2-tuple but got %d" % elements) + schema_editor.execute(sql, params=params) + elif sqls != RunSQL.noop: + statements = schema_editor.connection.ops.prepare_sql_script(sqls) + for statement in statements: + schema_editor.execute(statement, params=None) + + +class RunPython(Operation): + """ + Run Python code in a context suitable for doing versioned ORM operations. + """ + + reduces_to_sql = False + + def __init__( + self, code, reverse_code=None, atomic=None, hints=None, elidable=False + ): + self.atomic = atomic + # Forwards code + if not callable(code): + raise ValueError("RunPython must be supplied with a callable") + self.code = code + # Reverse code + if reverse_code is None: + self.reverse_code = None + else: + if not callable(reverse_code): + raise ValueError("RunPython must be supplied with callable arguments") + self.reverse_code = reverse_code + self.hints = hints or {} + self.elidable = elidable + + def deconstruct(self): + kwargs = { + "code": self.code, + } + if self.reverse_code is not None: + kwargs["reverse_code"] = self.reverse_code + if self.atomic is not None: + kwargs["atomic"] = self.atomic + if self.hints: + kwargs["hints"] = self.hints + return (self.__class__.__qualname__, [], kwargs) + + @property + def reversible(self): + return self.reverse_code is not None + + def state_forwards(self, app_label, state): + # RunPython objects have no state effect. To add some, combine this + # with SeparateDatabaseAndState. + pass + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + # RunPython has access to all models. Ensure that all models are + # reloaded in case any are delayed. + from_state.clear_delayed_apps_cache() + if router.allow_migrate( + schema_editor.connection.alias, app_label, **self.hints + ): + # We now execute the Python code in a context that contains a 'models' + # object, representing the versioned models as an app registry. + # We could try to override the global cache, but then people will still + # use direct imports, so we go with a documentation approach instead. + self.code(from_state.apps, schema_editor) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + if self.reverse_code is None: + raise NotImplementedError("You cannot reverse this operation") + if router.allow_migrate( + schema_editor.connection.alias, app_label, **self.hints + ): + self.reverse_code(from_state.apps, schema_editor) + + def describe(self): + return "Raw Python operation" + + @staticmethod + def noop(apps, schema_editor): + return None diff --git a/testbed/django__django/django/db/migrations/optimizer.py b/testbed/django__django/django/db/migrations/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..7e5dea23774326fb95afdde051a58bc55b45ca25 --- /dev/null +++ b/testbed/django__django/django/db/migrations/optimizer.py @@ -0,0 +1,69 @@ +class MigrationOptimizer: + """ + Power the optimization process, where you provide a list of Operations + and you are returned a list of equal or shorter length - operations + are merged into one if possible. + + For example, a CreateModel and an AddField can be optimized into a + new CreateModel, and CreateModel and DeleteModel can be optimized into + nothing. + """ + + def optimize(self, operations, app_label): + """ + Main optimization entry point. Pass in a list of Operation instances, + get out a new list of Operation instances. + + Unfortunately, due to the scope of the optimization (two combinable + operations might be separated by several hundred others), this can't be + done as a peephole optimization with checks/output implemented on + the Operations themselves; instead, the optimizer looks at each + individual operation and scans forwards in the list to see if there + are any matches, stopping at boundaries - operations which can't + be optimized over (RunSQL, operations on the same field/model, etc.) + + The inner loop is run until the starting list is the same as the result + list, and then the result is returned. This means that operation + optimization must be stable and always return an equal or shorter list. + """ + # Internal tracking variable for test assertions about # of loops + if app_label is None: + raise TypeError("app_label must be a str.") + self._iterations = 0 + while True: + result = self.optimize_inner(operations, app_label) + self._iterations += 1 + if result == operations: + return result + operations = result + + def optimize_inner(self, operations, app_label): + """Inner optimization loop.""" + new_operations = [] + for i, operation in enumerate(operations): + right = True # Should we reduce on the right or on the left. + # Compare it to each operation after it + for j, other in enumerate(operations[i + 1 :]): + result = operation.reduce(other, app_label) + if isinstance(result, list): + in_between = operations[i + 1 : i + j + 1] + if right: + new_operations.extend(in_between) + new_operations.extend(result) + elif all(op.reduce(other, app_label) is True for op in in_between): + # Perform a left reduction if all of the in-between + # operations can optimize through other. + new_operations.extend(result) + new_operations.extend(in_between) + else: + # Otherwise keep trying. + new_operations.append(operation) + break + new_operations.extend(operations[i + j + 2 :]) + return new_operations + elif not result: + # Can't perform a right reduction. + right = False + else: + new_operations.append(operation) + return new_operations diff --git a/testbed/django__django/django/db/migrations/questioner.py b/testbed/django__django/django/db/migrations/questioner.py new file mode 100644 index 0000000000000000000000000000000000000000..e1081ab70ac2f6eb45f7b94e3fc43c54a4308692 --- /dev/null +++ b/testbed/django__django/django/db/migrations/questioner.py @@ -0,0 +1,341 @@ +import datetime +import importlib +import os +import sys + +from django.apps import apps +from django.core.management.base import OutputWrapper +from django.db.models import NOT_PROVIDED +from django.utils import timezone +from django.utils.version import get_docs_version + +from .loader import MigrationLoader + + +class MigrationQuestioner: + """ + Give the autodetector responses to questions it might have. + This base class has a built-in noninteractive mode, but the + interactive subclass is what the command-line arguments will use. + """ + + def __init__(self, defaults=None, specified_apps=None, dry_run=None): + self.defaults = defaults or {} + self.specified_apps = specified_apps or set() + self.dry_run = dry_run + + def ask_initial(self, app_label): + """Should we create an initial migration for the app?""" + # If it was specified on the command line, definitely true + if app_label in self.specified_apps: + return True + # Otherwise, we look to see if it has a migrations module + # without any Python files in it, apart from __init__.py. + # Apps from the new app template will have these; the Python + # file check will ensure we skip South ones. + try: + app_config = apps.get_app_config(app_label) + except LookupError: # It's a fake app. + return self.defaults.get("ask_initial", False) + migrations_import_path, _ = MigrationLoader.migrations_module(app_config.label) + if migrations_import_path is None: + # It's an application with migrations disabled. + return self.defaults.get("ask_initial", False) + try: + migrations_module = importlib.import_module(migrations_import_path) + except ImportError: + return self.defaults.get("ask_initial", False) + else: + if getattr(migrations_module, "__file__", None): + filenames = os.listdir(os.path.dirname(migrations_module.__file__)) + elif hasattr(migrations_module, "__path__"): + if len(migrations_module.__path__) > 1: + return False + filenames = os.listdir(list(migrations_module.__path__)[0]) + return not any(x.endswith(".py") for x in filenames if x != "__init__.py") + + def ask_not_null_addition(self, field_name, model_name): + """Adding a NOT NULL field to a model.""" + # None means quit + return None + + def ask_not_null_alteration(self, field_name, model_name): + """Changing a NULL field to NOT NULL.""" + # None means quit + return None + + def ask_rename(self, model_name, old_name, new_name, field_instance): + """Was this field really renamed?""" + return self.defaults.get("ask_rename", False) + + def ask_rename_model(self, old_model_state, new_model_state): + """Was this model really renamed?""" + return self.defaults.get("ask_rename_model", False) + + def ask_merge(self, app_label): + """Should these migrations really be merged?""" + return self.defaults.get("ask_merge", False) + + def ask_auto_now_add_addition(self, field_name, model_name): + """Adding an auto_now_add field to a model.""" + # None means quit + return None + + def ask_unique_callable_default_addition(self, field_name, model_name): + """Adding a unique field with a callable default.""" + # None means continue. + return None + + +class InteractiveMigrationQuestioner(MigrationQuestioner): + def __init__( + self, defaults=None, specified_apps=None, dry_run=None, prompt_output=None + ): + super().__init__( + defaults=defaults, specified_apps=specified_apps, dry_run=dry_run + ) + self.prompt_output = prompt_output or OutputWrapper(sys.stdout) + + def _boolean_input(self, question, default=None): + self.prompt_output.write(f"{question} ", ending="") + result = input() + if not result and default is not None: + return default + while not result or result[0].lower() not in "yn": + self.prompt_output.write("Please answer yes or no: ", ending="") + result = input() + return result[0].lower() == "y" + + def _choice_input(self, question, choices): + self.prompt_output.write(f"{question}") + for i, choice in enumerate(choices): + self.prompt_output.write(" %s) %s" % (i + 1, choice)) + self.prompt_output.write("Select an option: ", ending="") + result = input() + while True: + try: + value = int(result) + except ValueError: + pass + else: + if 0 < value <= len(choices): + return value + self.prompt_output.write("Please select a valid option: ", ending="") + result = input() + + def _ask_default(self, default=""): + """ + Prompt for a default value. + + The ``default`` argument allows providing a custom default value (as a + string) which will be shown to the user and used as the return value + if the user doesn't provide any other input. + """ + self.prompt_output.write("Please enter the default value as valid Python.") + if default: + self.prompt_output.write( + f"Accept the default '{default}' by pressing 'Enter' or " + f"provide another value." + ) + self.prompt_output.write( + "The datetime and django.utils.timezone modules are available, so " + "it is possible to provide e.g. timezone.now as a value." + ) + self.prompt_output.write("Type 'exit' to exit this prompt") + while True: + if default: + prompt = "[default: {}] >>> ".format(default) + else: + prompt = ">>> " + self.prompt_output.write(prompt, ending="") + code = input() + if not code and default: + code = default + if not code: + self.prompt_output.write( + "Please enter some code, or 'exit' (without quotes) to exit." + ) + elif code == "exit": + sys.exit(1) + else: + try: + return eval(code, {}, {"datetime": datetime, "timezone": timezone}) + except (SyntaxError, NameError) as e: + self.prompt_output.write("Invalid input: %s" % e) + + def ask_not_null_addition(self, field_name, model_name): + """Adding a NOT NULL field to a model.""" + if not self.dry_run: + choice = self._choice_input( + f"It is impossible to add a non-nullable field '{field_name}' " + f"to {model_name} without specifying a default. This is " + f"because the database needs something to populate existing " + f"rows.\n" + f"Please select a fix:", + [ + ( + "Provide a one-off default now (will be set on all existing " + "rows with a null value for this column)" + ), + "Quit and manually define a default value in models.py.", + ], + ) + if choice == 2: + sys.exit(3) + else: + return self._ask_default() + return None + + def ask_not_null_alteration(self, field_name, model_name): + """Changing a NULL field to NOT NULL.""" + if not self.dry_run: + choice = self._choice_input( + f"It is impossible to change a nullable field '{field_name}' " + f"on {model_name} to non-nullable without providing a " + f"default. This is because the database needs something to " + f"populate existing rows.\n" + f"Please select a fix:", + [ + ( + "Provide a one-off default now (will be set on all existing " + "rows with a null value for this column)" + ), + "Ignore for now. Existing rows that contain NULL values " + "will have to be handled manually, for example with a " + "RunPython or RunSQL operation.", + "Quit and manually define a default value in models.py.", + ], + ) + if choice == 2: + return NOT_PROVIDED + elif choice == 3: + sys.exit(3) + else: + return self._ask_default() + return None + + def ask_rename(self, model_name, old_name, new_name, field_instance): + """Was this field really renamed?""" + msg = "Was %s.%s renamed to %s.%s (a %s)? [y/N]" + return self._boolean_input( + msg + % ( + model_name, + old_name, + model_name, + new_name, + field_instance.__class__.__name__, + ), + False, + ) + + def ask_rename_model(self, old_model_state, new_model_state): + """Was this model really renamed?""" + msg = "Was the model %s.%s renamed to %s? [y/N]" + return self._boolean_input( + msg + % (old_model_state.app_label, old_model_state.name, new_model_state.name), + False, + ) + + def ask_merge(self, app_label): + return self._boolean_input( + "\nMerging will only work if the operations printed above do not conflict\n" + + "with each other (working on different fields or models)\n" + + "Should these migration branches be merged? [y/N]", + False, + ) + + def ask_auto_now_add_addition(self, field_name, model_name): + """Adding an auto_now_add field to a model.""" + if not self.dry_run: + choice = self._choice_input( + f"It is impossible to add the field '{field_name}' with " + f"'auto_now_add=True' to {model_name} without providing a " + f"default. This is because the database needs something to " + f"populate existing rows.\n", + [ + "Provide a one-off default now which will be set on all " + "existing rows", + "Quit and manually define a default value in models.py.", + ], + ) + if choice == 2: + sys.exit(3) + else: + return self._ask_default(default="timezone.now") + return None + + def ask_unique_callable_default_addition(self, field_name, model_name): + """Adding a unique field with a callable default.""" + if not self.dry_run: + version = get_docs_version() + choice = self._choice_input( + f"Callable default on unique field {model_name}.{field_name} " + f"will not generate unique values upon migrating.\n" + f"Please choose how to proceed:\n", + [ + f"Continue making this migration as the first step in " + f"writing a manual migration to generate unique values " + f"described here: " + f"https://docs.djangoproject.com/en/{version}/howto/" + f"writing-migrations/#migrations-that-add-unique-fields.", + "Quit and edit field options in models.py.", + ], + ) + if choice == 2: + sys.exit(3) + return None + + +class NonInteractiveMigrationQuestioner(MigrationQuestioner): + def __init__( + self, + defaults=None, + specified_apps=None, + dry_run=None, + verbosity=1, + log=None, + ): + self.verbosity = verbosity + self.log = log + super().__init__( + defaults=defaults, + specified_apps=specified_apps, + dry_run=dry_run, + ) + + def log_lack_of_migration(self, field_name, model_name, reason): + if self.verbosity > 0: + self.log( + f"Field '{field_name}' on model '{model_name}' not migrated: " + f"{reason}." + ) + + def ask_not_null_addition(self, field_name, model_name): + # We can't ask the user, so act like the user aborted. + self.log_lack_of_migration( + field_name, + model_name, + "it is impossible to add a non-nullable field without specifying " + "a default", + ) + sys.exit(3) + + def ask_not_null_alteration(self, field_name, model_name): + # We can't ask the user, so set as not provided. + self.log( + f"Field '{field_name}' on model '{model_name}' given a default of " + f"NOT PROVIDED and must be corrected." + ) + return NOT_PROVIDED + + def ask_auto_now_add_addition(self, field_name, model_name): + # We can't ask the user, so act like the user aborted. + self.log_lack_of_migration( + field_name, + model_name, + "it is impossible to add a field with 'auto_now_add=True' without " + "specifying a default", + ) + sys.exit(3) diff --git a/testbed/django__django/django/db/migrations/recorder.py b/testbed/django__django/django/db/migrations/recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0bd4a2ce2021e53a873ce1ec85f8f4128d8e66 --- /dev/null +++ b/testbed/django__django/django/db/migrations/recorder.py @@ -0,0 +1,111 @@ +from django.apps.registry import Apps +from django.db import DatabaseError, models +from django.utils.functional import classproperty +from django.utils.timezone import now + +from .exceptions import MigrationSchemaMissing + + +class MigrationRecorder: + """ + Deal with storing migration records in the database. + + Because this table is actually itself used for dealing with model + creation, it's the one thing we can't do normally via migrations. + We manually handle table creation/schema updating (using schema backend) + and then have a floating model to do queries with. + + If a migration is unapplied its row is removed from the table. Having + a row in the table always means a migration is applied. + """ + + _migration_class = None + + @classproperty + def Migration(cls): + """ + Lazy load to avoid AppRegistryNotReady if installed apps import + MigrationRecorder. + """ + if cls._migration_class is None: + + class Migration(models.Model): + app = models.CharField(max_length=255) + name = models.CharField(max_length=255) + applied = models.DateTimeField(default=now) + + class Meta: + apps = Apps() + app_label = "migrations" + db_table = "django_migrations" + + def __str__(self): + return "Migration %s for %s" % (self.name, self.app) + + cls._migration_class = Migration + return cls._migration_class + + def __init__(self, connection): + self.connection = connection + self._has_table = False + + @property + def migration_qs(self): + return self.Migration.objects.using(self.connection.alias) + + def has_table(self): + """Return True if the django_migrations table exists.""" + # If the migrations table has already been confirmed to exist, don't + # recheck it's existence. + if self._has_table: + return True + # It hasn't been confirmed to exist, recheck. + with self.connection.cursor() as cursor: + tables = self.connection.introspection.table_names(cursor) + + self._has_table = self.Migration._meta.db_table in tables + return self._has_table + + def ensure_schema(self): + """Ensure the table exists and has the correct schema.""" + # If the table's there, that's fine - we've never changed its schema + # in the codebase. + if self.has_table(): + return + # Make the table + try: + with self.connection.schema_editor() as editor: + editor.create_model(self.Migration) + except DatabaseError as exc: + raise MigrationSchemaMissing( + "Unable to create the django_migrations table (%s)" % exc + ) + + def applied_migrations(self): + """ + Return a dict mapping (app_name, migration_name) to Migration instances + for all applied migrations. + """ + if self.has_table(): + return { + (migration.app, migration.name): migration + for migration in self.migration_qs + } + else: + # If the django_migrations table doesn't exist, then no migrations + # are applied. + return {} + + def record_applied(self, app, name): + """Record that a migration was applied.""" + self.ensure_schema() + self.migration_qs.create(app=app, name=name) + + def record_unapplied(self, app, name): + """Record that a migration was unapplied.""" + self.ensure_schema() + self.migration_qs.filter(app=app, name=name).delete() + + def flush(self): + """Delete all migration records. Useful for testing migrations.""" + self.migration_qs.all().delete() diff --git a/testbed/django__django/django/db/migrations/serializer.py b/testbed/django__django/django/db/migrations/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..454feaa8297126fbcfc70ee338cdef1efe51610b --- /dev/null +++ b/testbed/django__django/django/db/migrations/serializer.py @@ -0,0 +1,395 @@ +import builtins +import collections.abc +import datetime +import decimal +import enum +import functools +import math +import os +import pathlib +import re +import types +import uuid + +from django.conf import SettingsReference +from django.db import models +from django.db.migrations.operations.base import Operation +from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject +from django.utils.functional import LazyObject, Promise +from django.utils.version import PY311, get_docs_version + + +class BaseSerializer: + def __init__(self, value): + self.value = value + + def serialize(self): + raise NotImplementedError( + "Subclasses of BaseSerializer must implement the serialize() method." + ) + + +class BaseSequenceSerializer(BaseSerializer): + def _format(self): + raise NotImplementedError( + "Subclasses of BaseSequenceSerializer must implement the _format() method." + ) + + def serialize(self): + imports = set() + strings = [] + for item in self.value: + item_string, item_imports = serializer_factory(item).serialize() + imports.update(item_imports) + strings.append(item_string) + value = self._format() + return value % (", ".join(strings)), imports + + +class BaseSimpleSerializer(BaseSerializer): + def serialize(self): + return repr(self.value), set() + + +class ChoicesSerializer(BaseSerializer): + def serialize(self): + return serializer_factory(self.value.value).serialize() + + +class DateTimeSerializer(BaseSerializer): + """For datetime.*, except datetime.datetime.""" + + def serialize(self): + return repr(self.value), {"import datetime"} + + +class DatetimeDatetimeSerializer(BaseSerializer): + """For datetime.datetime.""" + + def serialize(self): + if self.value.tzinfo is not None and self.value.tzinfo != datetime.timezone.utc: + self.value = self.value.astimezone(datetime.timezone.utc) + imports = ["import datetime"] + return repr(self.value), set(imports) + + +class DecimalSerializer(BaseSerializer): + def serialize(self): + return repr(self.value), {"from decimal import Decimal"} + + +class DeconstructableSerializer(BaseSerializer): + @staticmethod + def serialize_deconstructed(path, args, kwargs): + name, imports = DeconstructableSerializer._serialize_path(path) + strings = [] + for arg in args: + arg_string, arg_imports = serializer_factory(arg).serialize() + strings.append(arg_string) + imports.update(arg_imports) + for kw, arg in sorted(kwargs.items()): + arg_string, arg_imports = serializer_factory(arg).serialize() + imports.update(arg_imports) + strings.append("%s=%s" % (kw, arg_string)) + return "%s(%s)" % (name, ", ".join(strings)), imports + + @staticmethod + def _serialize_path(path): + module, name = path.rsplit(".", 1) + if module == "django.db.models": + imports = {"from django.db import models"} + name = "models.%s" % name + else: + imports = {"import %s" % module} + name = path + return name, imports + + def serialize(self): + return self.serialize_deconstructed(*self.value.deconstruct()) + + +class DictionarySerializer(BaseSerializer): + def serialize(self): + imports = set() + strings = [] + for k, v in sorted(self.value.items()): + k_string, k_imports = serializer_factory(k).serialize() + v_string, v_imports = serializer_factory(v).serialize() + imports.update(k_imports) + imports.update(v_imports) + strings.append((k_string, v_string)) + return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports + + +class EnumSerializer(BaseSerializer): + def serialize(self): + enum_class = self.value.__class__ + module = enum_class.__module__ + if issubclass(enum_class, enum.Flag): + if PY311: + members = list(self.value) + else: + members, _ = enum._decompose(enum_class, self.value) + members = reversed(members) + else: + members = (self.value,) + return ( + " | ".join( + [ + f"{module}.{enum_class.__qualname__}[{item.name!r}]" + for item in members + ] + ), + {"import %s" % module}, + ) + + +class FloatSerializer(BaseSimpleSerializer): + def serialize(self): + if math.isnan(self.value) or math.isinf(self.value): + return 'float("{}")'.format(self.value), set() + return super().serialize() + + +class FrozensetSerializer(BaseSequenceSerializer): + def _format(self): + return "frozenset([%s])" + + +class FunctionTypeSerializer(BaseSerializer): + def serialize(self): + if getattr(self.value, "__self__", None) and isinstance( + self.value.__self__, type + ): + klass = self.value.__self__ + module = klass.__module__ + return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), { + "import %s" % module + } + # Further error checking + if self.value.__name__ == "": + raise ValueError("Cannot serialize function: lambda") + if self.value.__module__ is None: + raise ValueError("Cannot serialize function %r: No module" % self.value) + + module_name = self.value.__module__ + + if "<" not in self.value.__qualname__: # Qualname can include + return "%s.%s" % (module_name, self.value.__qualname__), { + "import %s" % self.value.__module__ + } + + raise ValueError( + "Could not find function %s in %s.\n" % (self.value.__name__, module_name) + ) + + +class FunctoolsPartialSerializer(BaseSerializer): + def serialize(self): + # Serialize functools.partial() arguments + func_string, func_imports = serializer_factory(self.value.func).serialize() + args_string, args_imports = serializer_factory(self.value.args).serialize() + keywords_string, keywords_imports = serializer_factory( + self.value.keywords + ).serialize() + # Add any imports needed by arguments + imports = {"import functools", *func_imports, *args_imports, *keywords_imports} + return ( + "functools.%s(%s, *%s, **%s)" + % ( + self.value.__class__.__name__, + func_string, + args_string, + keywords_string, + ), + imports, + ) + + +class IterableSerializer(BaseSerializer): + def serialize(self): + imports = set() + strings = [] + for item in self.value: + item_string, item_imports = serializer_factory(item).serialize() + imports.update(item_imports) + strings.append(item_string) + # When len(strings)==0, the empty iterable should be serialized as + # "()", not "(,)" because (,) is invalid Python syntax. + value = "(%s)" if len(strings) != 1 else "(%s,)" + return value % (", ".join(strings)), imports + + +class ModelFieldSerializer(DeconstructableSerializer): + def serialize(self): + attr_name, path, args, kwargs = self.value.deconstruct() + return self.serialize_deconstructed(path, args, kwargs) + + +class ModelManagerSerializer(DeconstructableSerializer): + def serialize(self): + as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct() + if as_manager: + name, imports = self._serialize_path(qs_path) + return "%s.as_manager()" % name, imports + else: + return self.serialize_deconstructed(manager_path, args, kwargs) + + +class OperationSerializer(BaseSerializer): + def serialize(self): + from django.db.migrations.writer import OperationWriter + + string, imports = OperationWriter(self.value, indentation=0).serialize() + # Nested operation, trailing comma is handled in upper OperationWriter._write() + return string.rstrip(","), imports + + +class PathLikeSerializer(BaseSerializer): + def serialize(self): + return repr(os.fspath(self.value)), {} + + +class PathSerializer(BaseSerializer): + def serialize(self): + # Convert concrete paths to pure paths to avoid issues with migrations + # generated on one platform being used on a different platform. + prefix = "Pure" if isinstance(self.value, pathlib.Path) else "" + return "pathlib.%s%r" % (prefix, self.value), {"import pathlib"} + + +class RegexSerializer(BaseSerializer): + def serialize(self): + regex_pattern, pattern_imports = serializer_factory( + self.value.pattern + ).serialize() + # Turn off default implicit flags (e.g. re.U) because regexes with the + # same implicit and explicit flags aren't equal. + flags = self.value.flags ^ re.compile("").flags + regex_flags, flag_imports = serializer_factory(flags).serialize() + imports = {"import re", *pattern_imports, *flag_imports} + args = [regex_pattern] + if flags: + args.append(regex_flags) + return "re.compile(%s)" % ", ".join(args), imports + + +class SequenceSerializer(BaseSequenceSerializer): + def _format(self): + return "[%s]" + + +class SetSerializer(BaseSequenceSerializer): + def _format(self): + # Serialize as a set literal except when value is empty because {} + # is an empty dict. + return "{%s}" if self.value else "set(%s)" + + +class SettingsReferenceSerializer(BaseSerializer): + def serialize(self): + return "settings.%s" % self.value.setting_name, { + "from django.conf import settings" + } + + +class TupleSerializer(BaseSequenceSerializer): + def _format(self): + # When len(value)==0, the empty tuple should be serialized as "()", + # not "(,)" because (,) is invalid Python syntax. + return "(%s)" if len(self.value) != 1 else "(%s,)" + + +class TypeSerializer(BaseSerializer): + def serialize(self): + special_cases = [ + (models.Model, "models.Model", ["from django.db import models"]), + (types.NoneType, "types.NoneType", ["import types"]), + ] + for case, string, imports in special_cases: + if case is self.value: + return string, set(imports) + if hasattr(self.value, "__module__"): + module = self.value.__module__ + if module == builtins.__name__: + return self.value.__name__, set() + else: + return "%s.%s" % (module, self.value.__qualname__), { + "import %s" % module + } + + +class UUIDSerializer(BaseSerializer): + def serialize(self): + return "uuid.%s" % repr(self.value), {"import uuid"} + + +class Serializer: + _registry = { + # Some of these are order-dependent. + frozenset: FrozensetSerializer, + list: SequenceSerializer, + set: SetSerializer, + tuple: TupleSerializer, + dict: DictionarySerializer, + models.Choices: ChoicesSerializer, + enum.Enum: EnumSerializer, + datetime.datetime: DatetimeDatetimeSerializer, + (datetime.date, datetime.timedelta, datetime.time): DateTimeSerializer, + SettingsReference: SettingsReferenceSerializer, + float: FloatSerializer, + (bool, int, types.NoneType, bytes, str, range): BaseSimpleSerializer, + decimal.Decimal: DecimalSerializer, + (functools.partial, functools.partialmethod): FunctoolsPartialSerializer, + ( + types.FunctionType, + types.BuiltinFunctionType, + types.MethodType, + ): FunctionTypeSerializer, + collections.abc.Iterable: IterableSerializer, + (COMPILED_REGEX_TYPE, RegexObject): RegexSerializer, + uuid.UUID: UUIDSerializer, + pathlib.PurePath: PathSerializer, + os.PathLike: PathLikeSerializer, + } + + @classmethod + def register(cls, type_, serializer): + if not issubclass(serializer, BaseSerializer): + raise ValueError( + "'%s' must inherit from 'BaseSerializer'." % serializer.__name__ + ) + cls._registry[type_] = serializer + + @classmethod + def unregister(cls, type_): + cls._registry.pop(type_) + + +def serializer_factory(value): + if isinstance(value, Promise): + value = str(value) + elif isinstance(value, LazyObject): + # The unwrapped value is returned as the first item of the arguments + # tuple. + value = value.__reduce__()[1][0] + + if isinstance(value, models.Field): + return ModelFieldSerializer(value) + if isinstance(value, models.manager.BaseManager): + return ModelManagerSerializer(value) + if isinstance(value, Operation): + return OperationSerializer(value) + if isinstance(value, type): + return TypeSerializer(value) + # Anything that knows how to deconstruct itself. + if hasattr(value, "deconstruct"): + return DeconstructableSerializer(value) + for type_, serializer_cls in Serializer._registry.items(): + if isinstance(value, type_): + return serializer_cls(value) + raise ValueError( + "Cannot serialize: %r\nThere are some values Django cannot serialize into " + "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" + "topics/migrations/#migration-serializing" % (value, get_docs_version()) + ) diff --git a/testbed/django__django/django/db/migrations/state.py b/testbed/django__django/django/db/migrations/state.py new file mode 100644 index 0000000000000000000000000000000000000000..ae55967383f147207d7047ca267a1e5cda1931b7 --- /dev/null +++ b/testbed/django__django/django/db/migrations/state.py @@ -0,0 +1,988 @@ +import copy +from collections import defaultdict +from contextlib import contextmanager +from functools import partial + +from django.apps import AppConfig +from django.apps.registry import Apps +from django.apps.registry import apps as global_apps +from django.conf import settings +from django.core.exceptions import FieldDoesNotExist +from django.db import models +from django.db.migrations.utils import field_is_referenced, get_references +from django.db.models import NOT_PROVIDED +from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT +from django.db.models.options import DEFAULT_NAMES, normalize_together +from django.db.models.utils import make_model_tuple +from django.utils.functional import cached_property +from django.utils.module_loading import import_string +from django.utils.version import get_docs_version + +from .exceptions import InvalidBasesError +from .utils import resolve_relation + + +def _get_app_label_and_model_name(model, app_label=""): + if isinstance(model, str): + split = model.split(".", 1) + return tuple(split) if len(split) == 2 else (app_label, split[0]) + else: + return model._meta.app_label, model._meta.model_name + + +def _get_related_models(m): + """Return all models that have a direct relationship to the given model.""" + related_models = [ + subclass + for subclass in m.__subclasses__() + if issubclass(subclass, models.Model) + ] + related_fields_models = set() + for f in m._meta.get_fields(include_parents=True, include_hidden=True): + if ( + f.is_relation + and f.related_model is not None + and not isinstance(f.related_model, str) + ): + related_fields_models.add(f.model) + related_models.append(f.related_model) + # Reverse accessors of foreign keys to proxy models are attached to their + # concrete proxied model. + opts = m._meta + if opts.proxy and m in related_fields_models: + related_models.append(opts.concrete_model) + return related_models + + +def get_related_models_tuples(model): + """ + Return a list of typical (app_label, model_name) tuples for all related + models for the given model. + """ + return { + (rel_mod._meta.app_label, rel_mod._meta.model_name) + for rel_mod in _get_related_models(model) + } + + +def get_related_models_recursive(model): + """ + Return all models that have a direct or indirect relationship + to the given model. + + Relationships are either defined by explicit relational fields, like + ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another + model (a superclass is related to its subclasses, but not vice versa). Note, + however, that a model inheriting from a concrete model is also related to + its superclass through the implicit *_ptr OneToOneField on the subclass. + """ + seen = set() + queue = _get_related_models(model) + for rel_mod in queue: + rel_app_label, rel_model_name = ( + rel_mod._meta.app_label, + rel_mod._meta.model_name, + ) + if (rel_app_label, rel_model_name) in seen: + continue + seen.add((rel_app_label, rel_model_name)) + queue.extend(_get_related_models(rel_mod)) + return seen - {(model._meta.app_label, model._meta.model_name)} + + +class ProjectState: + """ + Represent the entire project's overall state. This is the item that is + passed around - do it here rather than at the app level so that cross-app + FKs/etc. resolve properly. + """ + + def __init__(self, models=None, real_apps=None): + self.models = models or {} + # Apps to include from main registry, usually unmigrated ones + if real_apps is None: + real_apps = set() + else: + assert isinstance(real_apps, set) + self.real_apps = real_apps + self.is_delayed = False + # {remote_model_key: {model_key: {field_name: field}}} + self._relations = None + + @property + def relations(self): + if self._relations is None: + self.resolve_fields_and_relations() + return self._relations + + def add_model(self, model_state): + model_key = model_state.app_label, model_state.name_lower + self.models[model_key] = model_state + if self._relations is not None: + self.resolve_model_relations(model_key) + if "apps" in self.__dict__: # hasattr would cache the property + self.reload_model(*model_key) + + def remove_model(self, app_label, model_name): + model_key = app_label, model_name + del self.models[model_key] + if self._relations is not None: + self._relations.pop(model_key, None) + # Call list() since _relations can change size during iteration. + for related_model_key, model_relations in list(self._relations.items()): + model_relations.pop(model_key, None) + if not model_relations: + del self._relations[related_model_key] + if "apps" in self.__dict__: # hasattr would cache the property + self.apps.unregister_model(*model_key) + # Need to do this explicitly since unregister_model() doesn't clear + # the cache automatically (#24513) + self.apps.clear_cache() + + def rename_model(self, app_label, old_name, new_name): + # Add a new model. + old_name_lower = old_name.lower() + new_name_lower = new_name.lower() + renamed_model = self.models[app_label, old_name_lower].clone() + renamed_model.name = new_name + self.models[app_label, new_name_lower] = renamed_model + # Repoint all fields pointing to the old model to the new one. + old_model_tuple = (app_label, old_name_lower) + new_remote_model = f"{app_label}.{new_name}" + to_reload = set() + for model_state, name, field, reference in get_references( + self, old_model_tuple + ): + changed_field = None + if reference.to: + changed_field = field.clone() + changed_field.remote_field.model = new_remote_model + if reference.through: + if changed_field is None: + changed_field = field.clone() + changed_field.remote_field.through = new_remote_model + if changed_field: + model_state.fields[name] = changed_field + to_reload.add((model_state.app_label, model_state.name_lower)) + if self._relations is not None: + old_name_key = app_label, old_name_lower + new_name_key = app_label, new_name_lower + if old_name_key in self._relations: + self._relations[new_name_key] = self._relations.pop(old_name_key) + for model_relations in self._relations.values(): + if old_name_key in model_relations: + model_relations[new_name_key] = model_relations.pop(old_name_key) + # Reload models related to old model before removing the old model. + self.reload_models(to_reload, delay=True) + # Remove the old model. + self.remove_model(app_label, old_name_lower) + self.reload_model(app_label, new_name_lower, delay=True) + + def alter_model_options(self, app_label, model_name, options, option_keys=None): + model_state = self.models[app_label, model_name] + model_state.options = {**model_state.options, **options} + if option_keys: + for key in option_keys: + if key not in options: + model_state.options.pop(key, False) + self.reload_model(app_label, model_name, delay=True) + + def remove_model_options(self, app_label, model_name, option_name, value_to_remove): + model_state = self.models[app_label, model_name] + if objs := model_state.options.get(option_name): + model_state.options[option_name] = [ + obj for obj in objs if tuple(obj) != tuple(value_to_remove) + ] + self.reload_model(app_label, model_name, delay=True) + + def alter_model_managers(self, app_label, model_name, managers): + model_state = self.models[app_label, model_name] + model_state.managers = list(managers) + self.reload_model(app_label, model_name, delay=True) + + def _append_option(self, app_label, model_name, option_name, obj): + model_state = self.models[app_label, model_name] + model_state.options[option_name] = [*model_state.options[option_name], obj] + self.reload_model(app_label, model_name, delay=True) + + def _remove_option(self, app_label, model_name, option_name, obj_name): + model_state = self.models[app_label, model_name] + objs = model_state.options[option_name] + model_state.options[option_name] = [obj for obj in objs if obj.name != obj_name] + self.reload_model(app_label, model_name, delay=True) + + def add_index(self, app_label, model_name, index): + self._append_option(app_label, model_name, "indexes", index) + + def remove_index(self, app_label, model_name, index_name): + self._remove_option(app_label, model_name, "indexes", index_name) + + def rename_index(self, app_label, model_name, old_index_name, new_index_name): + model_state = self.models[app_label, model_name] + objs = model_state.options["indexes"] + + new_indexes = [] + for obj in objs: + if obj.name == old_index_name: + obj = obj.clone() + obj.name = new_index_name + new_indexes.append(obj) + + model_state.options["indexes"] = new_indexes + self.reload_model(app_label, model_name, delay=True) + + def add_constraint(self, app_label, model_name, constraint): + self._append_option(app_label, model_name, "constraints", constraint) + + def remove_constraint(self, app_label, model_name, constraint_name): + self._remove_option(app_label, model_name, "constraints", constraint_name) + + def add_field(self, app_label, model_name, name, field, preserve_default): + # If preserve default is off, don't use the default for future state. + if not preserve_default: + field = field.clone() + field.default = NOT_PROVIDED + else: + field = field + model_key = app_label, model_name + self.models[model_key].fields[name] = field + if self._relations is not None: + self.resolve_model_field_relations(model_key, name, field) + # Delay rendering of relationships if it's not a relational field. + delay = not field.is_relation + self.reload_model(*model_key, delay=delay) + + def remove_field(self, app_label, model_name, name): + model_key = app_label, model_name + model_state = self.models[model_key] + old_field = model_state.fields.pop(name) + if self._relations is not None: + self.resolve_model_field_relations(model_key, name, old_field) + # Delay rendering of relationships if it's not a relational field. + delay = not old_field.is_relation + self.reload_model(*model_key, delay=delay) + + def alter_field(self, app_label, model_name, name, field, preserve_default): + if not preserve_default: + field = field.clone() + field.default = NOT_PROVIDED + else: + field = field + model_key = app_label, model_name + fields = self.models[model_key].fields + if self._relations is not None: + old_field = fields.pop(name) + if old_field.is_relation: + self.resolve_model_field_relations(model_key, name, old_field) + fields[name] = field + if field.is_relation: + self.resolve_model_field_relations(model_key, name, field) + else: + fields[name] = field + # TODO: investigate if old relational fields must be reloaded or if + # it's sufficient if the new field is (#27737). + # Delay rendering of relationships if it's not a relational field and + # not referenced by a foreign key. + delay = not field.is_relation and not field_is_referenced( + self, model_key, (name, field) + ) + self.reload_model(*model_key, delay=delay) + + def rename_field(self, app_label, model_name, old_name, new_name): + model_key = app_label, model_name + model_state = self.models[model_key] + # Rename the field. + fields = model_state.fields + try: + found = fields.pop(old_name) + except KeyError: + raise FieldDoesNotExist( + f"{app_label}.{model_name} has no field named '{old_name}'" + ) + fields[new_name] = found + for field in fields.values(): + # Fix from_fields to refer to the new field. + from_fields = getattr(field, "from_fields", None) + if from_fields: + field.from_fields = tuple( + [ + new_name if from_field_name == old_name else from_field_name + for from_field_name in from_fields + ] + ) + # Fix index/unique_together to refer to the new field. + options = model_state.options + for option in ("index_together", "unique_together"): + if option in options: + options[option] = [ + [new_name if n == old_name else n for n in together] + for together in options[option] + ] + # Fix to_fields to refer to the new field. + delay = True + references = get_references(self, model_key, (old_name, found)) + for *_, field, reference in references: + delay = False + if reference.to: + remote_field, to_fields = reference.to + if getattr(remote_field, "field_name", None) == old_name: + remote_field.field_name = new_name + if to_fields: + field.to_fields = tuple( + [ + new_name if to_field_name == old_name else to_field_name + for to_field_name in to_fields + ] + ) + if self._relations is not None: + old_name_lower = old_name.lower() + new_name_lower = new_name.lower() + for to_model in self._relations.values(): + if old_name_lower in to_model[model_key]: + field = to_model[model_key].pop(old_name_lower) + field.name = new_name_lower + to_model[model_key][new_name_lower] = field + self.reload_model(*model_key, delay=delay) + + def _find_reload_model(self, app_label, model_name, delay=False): + if delay: + self.is_delayed = True + + related_models = set() + + try: + old_model = self.apps.get_model(app_label, model_name) + except LookupError: + pass + else: + # Get all relations to and from the old model before reloading, + # as _meta.apps may change + if delay: + related_models = get_related_models_tuples(old_model) + else: + related_models = get_related_models_recursive(old_model) + + # Get all outgoing references from the model to be rendered + model_state = self.models[(app_label, model_name)] + # Directly related models are the models pointed to by ForeignKeys, + # OneToOneFields, and ManyToManyFields. + direct_related_models = set() + for field in model_state.fields.values(): + if field.is_relation: + if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT: + continue + rel_app_label, rel_model_name = _get_app_label_and_model_name( + field.related_model, app_label + ) + direct_related_models.add((rel_app_label, rel_model_name.lower())) + + # For all direct related models recursively get all related models. + related_models.update(direct_related_models) + for rel_app_label, rel_model_name in direct_related_models: + try: + rel_model = self.apps.get_model(rel_app_label, rel_model_name) + except LookupError: + pass + else: + if delay: + related_models.update(get_related_models_tuples(rel_model)) + else: + related_models.update(get_related_models_recursive(rel_model)) + + # Include the model itself + related_models.add((app_label, model_name)) + + return related_models + + def reload_model(self, app_label, model_name, delay=False): + if "apps" in self.__dict__: # hasattr would cache the property + related_models = self._find_reload_model(app_label, model_name, delay) + self._reload(related_models) + + def reload_models(self, models, delay=True): + if "apps" in self.__dict__: # hasattr would cache the property + related_models = set() + for app_label, model_name in models: + related_models.update( + self._find_reload_model(app_label, model_name, delay) + ) + self._reload(related_models) + + def _reload(self, related_models): + # Unregister all related models + with self.apps.bulk_update(): + for rel_app_label, rel_model_name in related_models: + self.apps.unregister_model(rel_app_label, rel_model_name) + + states_to_be_rendered = [] + # Gather all models states of those models that will be rerendered. + # This includes: + # 1. All related models of unmigrated apps + for model_state in self.apps.real_models: + if (model_state.app_label, model_state.name_lower) in related_models: + states_to_be_rendered.append(model_state) + + # 2. All related models of migrated apps + for rel_app_label, rel_model_name in related_models: + try: + model_state = self.models[rel_app_label, rel_model_name] + except KeyError: + pass + else: + states_to_be_rendered.append(model_state) + + # Render all models + self.apps.render_multiple(states_to_be_rendered) + + def update_model_field_relation( + self, + model, + model_key, + field_name, + field, + concretes, + ): + remote_model_key = resolve_relation(model, *model_key) + if remote_model_key[0] not in self.real_apps and remote_model_key in concretes: + remote_model_key = concretes[remote_model_key] + relations_to_remote_model = self._relations[remote_model_key] + if field_name in self.models[model_key].fields: + # The assert holds because it's a new relation, or an altered + # relation, in which case references have been removed by + # alter_field(). + assert field_name not in relations_to_remote_model[model_key] + relations_to_remote_model[model_key][field_name] = field + else: + del relations_to_remote_model[model_key][field_name] + if not relations_to_remote_model[model_key]: + del relations_to_remote_model[model_key] + + def resolve_model_field_relations( + self, + model_key, + field_name, + field, + concretes=None, + ): + remote_field = field.remote_field + if not remote_field: + return + if concretes is None: + concretes, _ = self._get_concrete_models_mapping_and_proxy_models() + + self.update_model_field_relation( + remote_field.model, + model_key, + field_name, + field, + concretes, + ) + + through = getattr(remote_field, "through", None) + if not through: + return + self.update_model_field_relation( + through, model_key, field_name, field, concretes + ) + + def resolve_model_relations(self, model_key, concretes=None): + if concretes is None: + concretes, _ = self._get_concrete_models_mapping_and_proxy_models() + + model_state = self.models[model_key] + for field_name, field in model_state.fields.items(): + self.resolve_model_field_relations(model_key, field_name, field, concretes) + + def resolve_fields_and_relations(self): + # Resolve fields. + for model_state in self.models.values(): + for field_name, field in model_state.fields.items(): + field.name = field_name + # Resolve relations. + # {remote_model_key: {model_key: {field_name: field}}} + self._relations = defaultdict(partial(defaultdict, dict)) + concretes, proxies = self._get_concrete_models_mapping_and_proxy_models() + + for model_key in concretes: + self.resolve_model_relations(model_key, concretes) + + for model_key in proxies: + self._relations[model_key] = self._relations[concretes[model_key]] + + def get_concrete_model_key(self, model): + ( + concrete_models_mapping, + _, + ) = self._get_concrete_models_mapping_and_proxy_models() + model_key = make_model_tuple(model) + return concrete_models_mapping[model_key] + + def _get_concrete_models_mapping_and_proxy_models(self): + concrete_models_mapping = {} + proxy_models = {} + # Split models to proxy and concrete models. + for model_key, model_state in self.models.items(): + if model_state.options.get("proxy"): + proxy_models[model_key] = model_state + # Find a concrete model for the proxy. + concrete_models_mapping[ + model_key + ] = self._find_concrete_model_from_proxy( + proxy_models, + model_state, + ) + else: + concrete_models_mapping[model_key] = model_key + return concrete_models_mapping, proxy_models + + def _find_concrete_model_from_proxy(self, proxy_models, model_state): + for base in model_state.bases: + if not (isinstance(base, str) or issubclass(base, models.Model)): + continue + base_key = make_model_tuple(base) + base_state = proxy_models.get(base_key) + if not base_state: + # Concrete model found, stop looking at bases. + return base_key + return self._find_concrete_model_from_proxy(proxy_models, base_state) + + def clone(self): + """Return an exact copy of this ProjectState.""" + new_state = ProjectState( + models={k: v.clone() for k, v in self.models.items()}, + real_apps=self.real_apps, + ) + if "apps" in self.__dict__: + new_state.apps = self.apps.clone() + new_state.is_delayed = self.is_delayed + return new_state + + def clear_delayed_apps_cache(self): + if self.is_delayed and "apps" in self.__dict__: + del self.__dict__["apps"] + + @cached_property + def apps(self): + return StateApps(self.real_apps, self.models) + + @classmethod + def from_apps(cls, apps): + """Take an Apps and return a ProjectState matching it.""" + app_models = {} + for model in apps.get_models(include_swapped=True): + model_state = ModelState.from_model(model) + app_models[(model_state.app_label, model_state.name_lower)] = model_state + return cls(app_models) + + def __eq__(self, other): + return self.models == other.models and self.real_apps == other.real_apps + + +class AppConfigStub(AppConfig): + """Stub of an AppConfig. Only provides a label and a dict of models.""" + + def __init__(self, label): + self.apps = None + self.models = {} + # App-label and app-name are not the same thing, so technically passing + # in the label here is wrong. In practice, migrations don't care about + # the app name, but we need something unique, and the label works fine. + self.label = label + self.name = label + + def import_models(self): + self.models = self.apps.all_models[self.label] + + +class StateApps(Apps): + """ + Subclass of the global Apps registry class to better handle dynamic model + additions and removals. + """ + + def __init__(self, real_apps, models, ignore_swappable=False): + # Any apps in self.real_apps should have all their models included + # in the render. We don't use the original model instances as there + # are some variables that refer to the Apps object. + # FKs/M2Ms from real apps are also not included as they just + # mess things up with partial states (due to lack of dependencies) + self.real_models = [] + for app_label in real_apps: + app = global_apps.get_app_config(app_label) + for model in app.get_models(): + self.real_models.append(ModelState.from_model(model, exclude_rels=True)) + # Populate the app registry with a stub for each application. + app_labels = {model_state.app_label for model_state in models.values()} + app_configs = [ + AppConfigStub(label) for label in sorted([*real_apps, *app_labels]) + ] + super().__init__(app_configs) + + # These locks get in the way of copying as implemented in clone(), + # which is called whenever Django duplicates a StateApps before + # updating it. + self._lock = None + self.ready_event = None + + self.render_multiple([*models.values(), *self.real_models]) + + # There shouldn't be any operations pending at this point. + from django.core.checks.model_checks import _check_lazy_references + + ignore = ( + {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set() + ) + errors = _check_lazy_references(self, ignore=ignore) + if errors: + raise ValueError("\n".join(error.msg for error in errors)) + + @contextmanager + def bulk_update(self): + # Avoid clearing each model's cache for each change. Instead, clear + # all caches when we're finished updating the model instances. + ready = self.ready + self.ready = False + try: + yield + finally: + self.ready = ready + self.clear_cache() + + def render_multiple(self, model_states): + # We keep trying to render the models in a loop, ignoring invalid + # base errors, until the size of the unrendered models doesn't + # decrease by at least one, meaning there's a base dependency loop/ + # missing base. + if not model_states: + return + # Prevent that all model caches are expired for each render. + with self.bulk_update(): + unrendered_models = model_states + while unrendered_models: + new_unrendered_models = [] + for model in unrendered_models: + try: + model.render(self) + except InvalidBasesError: + new_unrendered_models.append(model) + if len(new_unrendered_models) == len(unrendered_models): + raise InvalidBasesError( + "Cannot resolve bases for %r\nThis can happen if you are " + "inheriting models from an app with migrations (e.g. " + "contrib.auth)\n in an app with no migrations; see " + "https://docs.djangoproject.com/en/%s/topics/migrations/" + "#dependencies for more" + % (new_unrendered_models, get_docs_version()) + ) + unrendered_models = new_unrendered_models + + def clone(self): + """Return a clone of this registry.""" + clone = StateApps([], {}) + clone.all_models = copy.deepcopy(self.all_models) + + for app_label in self.app_configs: + app_config = AppConfigStub(app_label) + app_config.apps = clone + app_config.import_models() + clone.app_configs[app_label] = app_config + + # No need to actually clone them, they'll never change + clone.real_models = self.real_models + return clone + + def register_model(self, app_label, model): + self.all_models[app_label][model._meta.model_name] = model + if app_label not in self.app_configs: + self.app_configs[app_label] = AppConfigStub(app_label) + self.app_configs[app_label].apps = self + self.app_configs[app_label].models[model._meta.model_name] = model + self.do_pending_operations(model) + self.clear_cache() + + def unregister_model(self, app_label, model_name): + try: + del self.all_models[app_label][model_name] + del self.app_configs[app_label].models[model_name] + except KeyError: + pass + + +class ModelState: + """ + Represent a Django Model. Don't use the actual Model class as it's not + designed to have its options changed - instead, mutate this one and then + render it into a Model as required. + + Note that while you are allowed to mutate .fields, you are not allowed + to mutate the Field instances inside there themselves - you must instead + assign new ones, as these are not detached during a clone. + """ + + def __init__( + self, app_label, name, fields, options=None, bases=None, managers=None + ): + self.app_label = app_label + self.name = name + self.fields = dict(fields) + self.options = options or {} + self.options.setdefault("indexes", []) + self.options.setdefault("constraints", []) + self.bases = bases or (models.Model,) + self.managers = managers or [] + for name, field in self.fields.items(): + # Sanity-check that fields are NOT already bound to a model. + if hasattr(field, "model"): + raise ValueError( + 'ModelState.fields cannot be bound to a model - "%s" is.' % name + ) + # Sanity-check that relation fields are NOT referring to a model class. + if field.is_relation and hasattr(field.related_model, "_meta"): + raise ValueError( + 'ModelState.fields cannot refer to a model class - "%s.to" does. ' + "Use a string reference instead." % name + ) + if field.many_to_many and hasattr(field.remote_field.through, "_meta"): + raise ValueError( + 'ModelState.fields cannot refer to a model class - "%s.through" ' + "does. Use a string reference instead." % name + ) + # Sanity-check that indexes have their name set. + for index in self.options["indexes"]: + if not index.name: + raise ValueError( + "Indexes passed to ModelState require a name attribute. " + "%r doesn't have one." % index + ) + + @cached_property + def name_lower(self): + return self.name.lower() + + def get_field(self, field_name): + if field_name == "_order": + field_name = self.options.get("order_with_respect_to", field_name) + return self.fields[field_name] + + @classmethod + def from_model(cls, model, exclude_rels=False): + """Given a model, return a ModelState representing it.""" + # Deconstruct the fields + fields = [] + for field in model._meta.local_fields: + if getattr(field, "remote_field", None) and exclude_rels: + continue + if isinstance(field, models.OrderWrt): + continue + name = field.name + try: + fields.append((name, field.clone())) + except TypeError as e: + raise TypeError( + "Couldn't reconstruct field %s on %s: %s" + % ( + name, + model._meta.label, + e, + ) + ) + if not exclude_rels: + for field in model._meta.local_many_to_many: + name = field.name + try: + fields.append((name, field.clone())) + except TypeError as e: + raise TypeError( + "Couldn't reconstruct m2m field %s on %s: %s" + % ( + name, + model._meta.object_name, + e, + ) + ) + # Extract the options + options = {} + for name in DEFAULT_NAMES: + # Ignore some special options + if name in ["apps", "app_label"]: + continue + elif name in model._meta.original_attrs: + if name == "unique_together": + ut = model._meta.original_attrs["unique_together"] + options[name] = set(normalize_together(ut)) + elif name == "index_together": + it = model._meta.original_attrs["index_together"] + options[name] = set(normalize_together(it)) + elif name == "indexes": + indexes = [idx.clone() for idx in model._meta.indexes] + for index in indexes: + if not index.name: + index.set_name_with_model(model) + options["indexes"] = indexes + elif name == "constraints": + options["constraints"] = [ + con.clone() for con in model._meta.constraints + ] + else: + options[name] = model._meta.original_attrs[name] + # If we're ignoring relationships, remove all field-listing model + # options (that option basically just means "make a stub model") + if exclude_rels: + for key in ["unique_together", "index_together", "order_with_respect_to"]: + if key in options: + del options[key] + # Private fields are ignored, so remove options that refer to them. + elif options.get("order_with_respect_to") in { + field.name for field in model._meta.private_fields + }: + del options["order_with_respect_to"] + + def flatten_bases(model): + bases = [] + for base in model.__bases__: + if hasattr(base, "_meta") and base._meta.abstract: + bases.extend(flatten_bases(base)) + else: + bases.append(base) + return bases + + # We can't rely on __mro__ directly because we only want to flatten + # abstract models and not the whole tree. However by recursing on + # __bases__ we may end up with duplicates and ordering issues, we + # therefore discard any duplicates and reorder the bases according + # to their index in the MRO. + flattened_bases = sorted( + set(flatten_bases(model)), key=lambda x: model.__mro__.index(x) + ) + + # Make our record + bases = tuple( + (base._meta.label_lower if hasattr(base, "_meta") else base) + for base in flattened_bases + ) + # Ensure at least one base inherits from models.Model + if not any( + (isinstance(base, str) or issubclass(base, models.Model)) for base in bases + ): + bases = (models.Model,) + + managers = [] + manager_names = set() + default_manager_shim = None + for manager in model._meta.managers: + if manager.name in manager_names: + # Skip overridden managers. + continue + elif manager.use_in_migrations: + # Copy managers usable in migrations. + new_manager = copy.copy(manager) + new_manager._set_creation_counter() + elif manager is model._base_manager or manager is model._default_manager: + # Shim custom managers used as default and base managers. + new_manager = models.Manager() + new_manager.model = manager.model + new_manager.name = manager.name + if manager is model._default_manager: + default_manager_shim = new_manager + else: + continue + manager_names.add(manager.name) + managers.append((manager.name, new_manager)) + + # Ignore a shimmed default manager called objects if it's the only one. + if managers == [("objects", default_manager_shim)]: + managers = [] + + # Construct the new ModelState + return cls( + model._meta.app_label, + model._meta.object_name, + fields, + options, + bases, + managers, + ) + + def construct_managers(self): + """Deep-clone the managers using deconstruction.""" + # Sort all managers by their creation counter + sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter) + for mgr_name, manager in sorted_managers: + as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct() + if as_manager: + qs_class = import_string(qs_path) + yield mgr_name, qs_class.as_manager() + else: + manager_class = import_string(manager_path) + yield mgr_name, manager_class(*args, **kwargs) + + def clone(self): + """Return an exact copy of this ModelState.""" + return self.__class__( + app_label=self.app_label, + name=self.name, + fields=dict(self.fields), + # Since options are shallow-copied here, operations such as + # AddIndex must replace their option (e.g 'indexes') rather + # than mutating it. + options=dict(self.options), + bases=self.bases, + managers=list(self.managers), + ) + + def render(self, apps): + """Create a Model object from our current state into the given apps.""" + # First, make a Meta object + meta_contents = {"app_label": self.app_label, "apps": apps, **self.options} + meta = type("Meta", (), meta_contents) + # Then, work out our bases + try: + bases = tuple( + (apps.get_model(base) if isinstance(base, str) else base) + for base in self.bases + ) + except LookupError: + raise InvalidBasesError( + "Cannot resolve one or more bases from %r" % (self.bases,) + ) + # Clone fields for the body, add other bits. + body = {name: field.clone() for name, field in self.fields.items()} + body["Meta"] = meta + body["__module__"] = "__fake__" + + # Restore managers + body.update(self.construct_managers()) + # Then, make a Model object (apps.register_model is called in __new__) + return type(self.name, bases, body) + + def get_index_by_name(self, name): + for index in self.options["indexes"]: + if index.name == name: + return index + raise ValueError("No index named %s on model %s" % (name, self.name)) + + def get_constraint_by_name(self, name): + for constraint in self.options["constraints"]: + if constraint.name == name: + return constraint + raise ValueError("No constraint named %s on model %s" % (name, self.name)) + + def __repr__(self): + return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name) + + def __eq__(self, other): + return ( + (self.app_label == other.app_label) + and (self.name == other.name) + and (len(self.fields) == len(other.fields)) + and all( + k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:] + for (k1, f1), (k2, f2) in zip( + sorted(self.fields.items()), + sorted(other.fields.items()), + ) + ) + and (self.options == other.options) + and (self.bases == other.bases) + and (self.managers == other.managers) + ) diff --git a/testbed/django__django/django/db/migrations/utils.py b/testbed/django__django/django/db/migrations/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb5a4c68f400a257cc8a749f7acc4c87d842e0b --- /dev/null +++ b/testbed/django__django/django/db/migrations/utils.py @@ -0,0 +1,129 @@ +import datetime +import re +from collections import namedtuple + +from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT + +FieldReference = namedtuple("FieldReference", "to through") + +COMPILED_REGEX_TYPE = type(re.compile("")) + + +class RegexObject: + def __init__(self, obj): + self.pattern = obj.pattern + self.flags = obj.flags + + def __eq__(self, other): + if not isinstance(other, RegexObject): + return NotImplemented + return self.pattern == other.pattern and self.flags == other.flags + + +def get_migration_name_timestamp(): + return datetime.datetime.now().strftime("%Y%m%d_%H%M") + + +def resolve_relation(model, app_label=None, model_name=None): + """ + Turn a model class or model reference string and return a model tuple. + + app_label and model_name are used to resolve the scope of recursive and + unscoped model relationship. + """ + if isinstance(model, str): + if model == RECURSIVE_RELATIONSHIP_CONSTANT: + if app_label is None or model_name is None: + raise TypeError( + "app_label and model_name must be provided to resolve " + "recursive relationships." + ) + return app_label, model_name + if "." in model: + app_label, model_name = model.split(".", 1) + return app_label, model_name.lower() + if app_label is None: + raise TypeError( + "app_label must be provided to resolve unscoped model relationships." + ) + return app_label, model.lower() + return model._meta.app_label, model._meta.model_name + + +def field_references( + model_tuple, + field, + reference_model_tuple, + reference_field_name=None, + reference_field=None, +): + """ + Return either False or a FieldReference if `field` references provided + context. + + False positives can be returned if `reference_field_name` is provided + without `reference_field` because of the introspection limitation it + incurs. This should not be an issue when this function is used to determine + whether or not an optimization can take place. + """ + remote_field = field.remote_field + if not remote_field: + return False + references_to = None + references_through = None + if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple: + to_fields = getattr(field, "to_fields", None) + if ( + reference_field_name is None + or + # Unspecified to_field(s). + to_fields is None + or + # Reference to primary key. + ( + None in to_fields + and (reference_field is None or reference_field.primary_key) + ) + or + # Reference to field. + reference_field_name in to_fields + ): + references_to = (remote_field, to_fields) + through = getattr(remote_field, "through", None) + if through and resolve_relation(through, *model_tuple) == reference_model_tuple: + through_fields = remote_field.through_fields + if ( + reference_field_name is None + or + # Unspecified through_fields. + through_fields is None + or + # Reference to field. + reference_field_name in through_fields + ): + references_through = (remote_field, through_fields) + if not (references_to or references_through): + return False + return FieldReference(references_to, references_through) + + +def get_references(state, model_tuple, field_tuple=()): + """ + Generator of (model_state, name, field, reference) referencing + provided context. + + If field_tuple is provided only references to this particular field of + model_tuple will be generated. + """ + for state_model_tuple, model_state in state.models.items(): + for name, field in model_state.fields.items(): + reference = field_references( + state_model_tuple, field, model_tuple, *field_tuple + ) + if reference: + yield model_state, name, field, reference + + +def field_is_referenced(state, model_tuple, field_tuple): + """Return whether `field_tuple` is referenced by any state models.""" + return next(get_references(state, model_tuple, field_tuple), None) is not None diff --git a/testbed/django__django/django/db/migrations/writer.py b/testbed/django__django/django/db/migrations/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..79b89b269dbca09b7476075a34a6f1ed8cc283d2 --- /dev/null +++ b/testbed/django__django/django/db/migrations/writer.py @@ -0,0 +1,314 @@ +import os +import re +from importlib import import_module + +from django import get_version +from django.apps import apps + +# SettingsReference imported for backwards compatibility in Django 2.2. +from django.conf import SettingsReference # NOQA +from django.db import migrations +from django.db.migrations.loader import MigrationLoader +from django.db.migrations.serializer import Serializer, serializer_factory +from django.utils.inspect import get_func_args +from django.utils.module_loading import module_dir +from django.utils.timezone import now + + +class OperationWriter: + def __init__(self, operation, indentation=2): + self.operation = operation + self.buff = [] + self.indentation = indentation + + def serialize(self): + def _write(_arg_name, _arg_value): + if _arg_name in self.operation.serialization_expand_args and isinstance( + _arg_value, (list, tuple, dict) + ): + if isinstance(_arg_value, dict): + self.feed("%s={" % _arg_name) + self.indent() + for key, value in _arg_value.items(): + key_string, key_imports = MigrationWriter.serialize(key) + arg_string, arg_imports = MigrationWriter.serialize(value) + args = arg_string.splitlines() + if len(args) > 1: + self.feed("%s: %s" % (key_string, args[0])) + for arg in args[1:-1]: + self.feed(arg) + self.feed("%s," % args[-1]) + else: + self.feed("%s: %s," % (key_string, arg_string)) + imports.update(key_imports) + imports.update(arg_imports) + self.unindent() + self.feed("},") + else: + self.feed("%s=[" % _arg_name) + self.indent() + for item in _arg_value: + arg_string, arg_imports = MigrationWriter.serialize(item) + args = arg_string.splitlines() + if len(args) > 1: + for arg in args[:-1]: + self.feed(arg) + self.feed("%s," % args[-1]) + else: + self.feed("%s," % arg_string) + imports.update(arg_imports) + self.unindent() + self.feed("],") + else: + arg_string, arg_imports = MigrationWriter.serialize(_arg_value) + args = arg_string.splitlines() + if len(args) > 1: + self.feed("%s=%s" % (_arg_name, args[0])) + for arg in args[1:-1]: + self.feed(arg) + self.feed("%s," % args[-1]) + else: + self.feed("%s=%s," % (_arg_name, arg_string)) + imports.update(arg_imports) + + imports = set() + name, args, kwargs = self.operation.deconstruct() + operation_args = get_func_args(self.operation.__init__) + + # See if this operation is in django.db.migrations. If it is, + # We can just use the fact we already have that imported, + # otherwise, we need to add an import for the operation class. + if getattr(migrations, name, None) == self.operation.__class__: + self.feed("migrations.%s(" % name) + else: + imports.add("import %s" % (self.operation.__class__.__module__)) + self.feed("%s.%s(" % (self.operation.__class__.__module__, name)) + + self.indent() + + for i, arg in enumerate(args): + arg_value = arg + arg_name = operation_args[i] + _write(arg_name, arg_value) + + i = len(args) + # Only iterate over remaining arguments + for arg_name in operation_args[i:]: + if arg_name in kwargs: # Don't sort to maintain signature order + arg_value = kwargs[arg_name] + _write(arg_name, arg_value) + + self.unindent() + self.feed("),") + return self.render(), imports + + def indent(self): + self.indentation += 1 + + def unindent(self): + self.indentation -= 1 + + def feed(self, line): + self.buff.append(" " * (self.indentation * 4) + line) + + def render(self): + return "\n".join(self.buff) + + +class MigrationWriter: + """ + Take a Migration instance and is able to produce the contents + of the migration file from it. + """ + + def __init__(self, migration, include_header=True): + self.migration = migration + self.include_header = include_header + self.needs_manual_porting = False + + def as_string(self): + """Return a string of the file contents.""" + items = { + "replaces_str": "", + "initial_str": "", + } + + imports = set() + + # Deconstruct operations + operations = [] + for operation in self.migration.operations: + operation_string, operation_imports = OperationWriter(operation).serialize() + imports.update(operation_imports) + operations.append(operation_string) + items["operations"] = "\n".join(operations) + "\n" if operations else "" + + # Format dependencies and write out swappable dependencies right + dependencies = [] + for dependency in self.migration.dependencies: + if dependency[0] == "__setting__": + dependencies.append( + " migrations.swappable_dependency(settings.%s)," + % dependency[1] + ) + imports.add("from django.conf import settings") + else: + dependencies.append(" %s," % self.serialize(dependency)[0]) + items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else "" + + # Format imports nicely, swapping imports of functions from migration files + # for comments + migration_imports = set() + for line in list(imports): + if re.match(r"^import (.*)\.\d+[^\s]*$", line): + migration_imports.add(line.split("import")[1].strip()) + imports.remove(line) + self.needs_manual_porting = True + + # django.db.migrations is always used, but models import may not be. + # If models import exists, merge it with migrations import. + if "from django.db import models" in imports: + imports.discard("from django.db import models") + imports.add("from django.db import migrations, models") + else: + imports.add("from django.db import migrations") + + # Sort imports by the package / module to be imported (the part after + # "from" in "from ... import ..." or after "import" in "import ..."). + # First group the "import" statements, then "from ... import ...". + sorted_imports = sorted( + imports, key=lambda i: (i.split()[0] == "from", i.split()[1]) + ) + items["imports"] = "\n".join(sorted_imports) + "\n" if imports else "" + if migration_imports: + items["imports"] += ( + "\n\n# Functions from the following migrations need manual " + "copying.\n# Move them and any dependencies into this file, " + "then update the\n# RunPython operations to refer to the local " + "versions:\n# %s" + ) % "\n# ".join(sorted(migration_imports)) + # If there's a replaces, make a string for it + if self.migration.replaces: + items["replaces_str"] = ( + "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0] + ) + # Hinting that goes into comment + if self.include_header: + items["migration_header"] = MIGRATION_HEADER_TEMPLATE % { + "version": get_version(), + "timestamp": now().strftime("%Y-%m-%d %H:%M"), + } + else: + items["migration_header"] = "" + + if self.migration.initial: + items["initial_str"] = "\n initial = True\n" + + return MIGRATION_TEMPLATE % items + + @property + def basedir(self): + migrations_package_name, _ = MigrationLoader.migrations_module( + self.migration.app_label + ) + + if migrations_package_name is None: + raise ValueError( + "Django can't create migrations for app '%s' because " + "migrations have been disabled via the MIGRATION_MODULES " + "setting." % self.migration.app_label + ) + + # See if we can import the migrations module directly + try: + migrations_module = import_module(migrations_package_name) + except ImportError: + pass + else: + try: + return module_dir(migrations_module) + except ValueError: + pass + + # Alright, see if it's a direct submodule of the app + app_config = apps.get_app_config(self.migration.app_label) + ( + maybe_app_name, + _, + migrations_package_basename, + ) = migrations_package_name.rpartition(".") + if app_config.name == maybe_app_name: + return os.path.join(app_config.path, migrations_package_basename) + + # In case of using MIGRATION_MODULES setting and the custom package + # doesn't exist, create one, starting from an existing package + existing_dirs, missing_dirs = migrations_package_name.split("."), [] + while existing_dirs: + missing_dirs.insert(0, existing_dirs.pop(-1)) + try: + base_module = import_module(".".join(existing_dirs)) + except (ImportError, ValueError): + continue + else: + try: + base_dir = module_dir(base_module) + except ValueError: + continue + else: + break + else: + raise ValueError( + "Could not locate an appropriate location to create " + "migrations package %s. Make sure the toplevel " + "package exists and can be imported." % migrations_package_name + ) + + final_dir = os.path.join(base_dir, *missing_dirs) + os.makedirs(final_dir, exist_ok=True) + for missing_dir in missing_dirs: + base_dir = os.path.join(base_dir, missing_dir) + with open(os.path.join(base_dir, "__init__.py"), "w"): + pass + + return final_dir + + @property + def filename(self): + return "%s.py" % self.migration.name + + @property + def path(self): + return os.path.join(self.basedir, self.filename) + + @classmethod + def serialize(cls, value): + return serializer_factory(value).serialize() + + @classmethod + def register_serializer(cls, type_, serializer): + Serializer.register(type_, serializer) + + @classmethod + def unregister_serializer(cls, type_): + Serializer.unregister(type_) + + +MIGRATION_HEADER_TEMPLATE = """\ +# Generated by Django %(version)s on %(timestamp)s + +""" + + +MIGRATION_TEMPLATE = """\ +%(migration_header)s%(imports)s + +class Migration(migrations.Migration): +%(replaces_str)s%(initial_str)s + dependencies = [ +%(dependencies)s\ + ] + + operations = [ +%(operations)s\ + ] +""" diff --git a/testbed/django__django/django/db/models/__init__.py b/testbed/django__django/django/db/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ffca81de91b51c670c3b75c437ce8cdb3382bcd4 --- /dev/null +++ b/testbed/django__django/django/db/models/__init__.py @@ -0,0 +1,115 @@ +from django.core.exceptions import ObjectDoesNotExist +from django.db.models import signals +from django.db.models.aggregates import * # NOQA +from django.db.models.aggregates import __all__ as aggregates_all +from django.db.models.constraints import * # NOQA +from django.db.models.constraints import __all__ as constraints_all +from django.db.models.deletion import ( + CASCADE, + DO_NOTHING, + PROTECT, + RESTRICT, + SET, + SET_DEFAULT, + SET_NULL, + ProtectedError, + RestrictedError, +) +from django.db.models.enums import * # NOQA +from django.db.models.enums import __all__ as enums_all +from django.db.models.expressions import ( + Case, + Exists, + Expression, + ExpressionList, + ExpressionWrapper, + F, + Func, + OrderBy, + OuterRef, + RowRange, + Subquery, + Value, + ValueRange, + When, + Window, + WindowFrame, +) +from django.db.models.fields import * # NOQA +from django.db.models.fields import __all__ as fields_all +from django.db.models.fields.files import FileField, ImageField +from django.db.models.fields.json import JSONField +from django.db.models.fields.proxy import OrderWrt +from django.db.models.indexes import * # NOQA +from django.db.models.indexes import __all__ as indexes_all +from django.db.models.lookups import Lookup, Transform +from django.db.models.manager import Manager +from django.db.models.query import Prefetch, QuerySet, prefetch_related_objects +from django.db.models.query_utils import FilteredRelation, Q + +# Imports that would create circular imports if sorted +from django.db.models.base import DEFERRED, Model # isort:skip +from django.db.models.fields.related import ( # isort:skip + ForeignKey, + ForeignObject, + OneToOneField, + ManyToManyField, + ForeignObjectRel, + ManyToOneRel, + ManyToManyRel, + OneToOneRel, +) + + +__all__ = aggregates_all + constraints_all + enums_all + fields_all + indexes_all +__all__ += [ + "ObjectDoesNotExist", + "signals", + "CASCADE", + "DO_NOTHING", + "PROTECT", + "RESTRICT", + "SET", + "SET_DEFAULT", + "SET_NULL", + "ProtectedError", + "RestrictedError", + "Case", + "Exists", + "Expression", + "ExpressionList", + "ExpressionWrapper", + "F", + "Func", + "OrderBy", + "OuterRef", + "RowRange", + "Subquery", + "Value", + "ValueRange", + "When", + "Window", + "WindowFrame", + "FileField", + "ImageField", + "JSONField", + "OrderWrt", + "Lookup", + "Transform", + "Manager", + "Prefetch", + "Q", + "QuerySet", + "prefetch_related_objects", + "DEFERRED", + "Model", + "FilteredRelation", + "ForeignKey", + "ForeignObject", + "OneToOneField", + "ManyToManyField", + "ForeignObjectRel", + "ManyToOneRel", + "ManyToManyRel", + "OneToOneRel", +] diff --git a/testbed/django__django/django/db/models/aggregates.py b/testbed/django__django/django/db/models/aggregates.py new file mode 100644 index 0000000000000000000000000000000000000000..a778cd413b3e0c7179511c9781c295ed7881d23d --- /dev/null +++ b/testbed/django__django/django/db/models/aggregates.py @@ -0,0 +1,210 @@ +""" +Classes to represent the definitions of aggregate functions. +""" +from django.core.exceptions import FieldError, FullResultSet +from django.db.models.expressions import Case, Func, Star, Value, When +from django.db.models.fields import IntegerField +from django.db.models.functions.comparison import Coalesce +from django.db.models.functions.mixins import ( + FixDurationInputMixin, + NumericOutputFieldMixin, +) + +__all__ = [ + "Aggregate", + "Avg", + "Count", + "Max", + "Min", + "StdDev", + "Sum", + "Variance", +] + + +class Aggregate(Func): + template = "%(function)s(%(distinct)s%(expressions)s)" + contains_aggregate = True + name = None + filter_template = "%s FILTER (WHERE %%(filter)s)" + window_compatible = True + allow_distinct = False + empty_result_set_value = None + + def __init__( + self, *expressions, distinct=False, filter=None, default=None, **extra + ): + if distinct and not self.allow_distinct: + raise TypeError("%s does not allow distinct." % self.__class__.__name__) + if default is not None and self.empty_result_set_value is not None: + raise TypeError(f"{self.__class__.__name__} does not allow default.") + self.distinct = distinct + self.filter = filter + self.default = default + super().__init__(*expressions, **extra) + + def get_source_fields(self): + # Don't return the filter expression since it's not a source field. + return [e._output_field_or_none for e in super().get_source_expressions()] + + def get_source_expressions(self): + source_expressions = super().get_source_expressions() + if self.filter: + return source_expressions + [self.filter] + return source_expressions + + def set_source_expressions(self, exprs): + self.filter = self.filter and exprs.pop() + return super().set_source_expressions(exprs) + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + # Aggregates are not allowed in UPDATE queries, so ignore for_save + c = super().resolve_expression(query, allow_joins, reuse, summarize) + c.filter = c.filter and c.filter.resolve_expression( + query, allow_joins, reuse, summarize + ) + if summarize: + # Summarized aggregates cannot refer to summarized aggregates. + for ref in c.get_refs(): + if query.annotations[ref].is_summary: + raise FieldError( + f"Cannot compute {c.name}('{ref}'): '{ref}' is an aggregate" + ) + elif not self.is_summary: + # Call Aggregate.get_source_expressions() to avoid + # returning self.filter and including that in this loop. + expressions = super(Aggregate, c).get_source_expressions() + for index, expr in enumerate(expressions): + if expr.contains_aggregate: + before_resolved = self.get_source_expressions()[index] + name = ( + before_resolved.name + if hasattr(before_resolved, "name") + else repr(before_resolved) + ) + raise FieldError( + "Cannot compute %s('%s'): '%s' is an aggregate" + % (c.name, name, name) + ) + if (default := c.default) is None: + return c + if hasattr(default, "resolve_expression"): + default = default.resolve_expression(query, allow_joins, reuse, summarize) + if default._output_field_or_none is None: + default.output_field = c._output_field_or_none + else: + default = Value(default, c._output_field_or_none) + c.default = None # Reset the default argument before wrapping. + coalesce = Coalesce(c, default, output_field=c._output_field_or_none) + coalesce.is_summary = c.is_summary + return coalesce + + @property + def default_alias(self): + expressions = self.get_source_expressions() + if len(expressions) == 1 and hasattr(expressions[0], "name"): + return "%s__%s" % (expressions[0].name, self.name.lower()) + raise TypeError("Complex expressions require an alias") + + def get_group_by_cols(self): + return [] + + def as_sql(self, compiler, connection, **extra_context): + extra_context["distinct"] = "DISTINCT " if self.distinct else "" + if self.filter: + if connection.features.supports_aggregate_filter_clause: + try: + filter_sql, filter_params = self.filter.as_sql(compiler, connection) + except FullResultSet: + pass + else: + template = self.filter_template % extra_context.get( + "template", self.template + ) + sql, params = super().as_sql( + compiler, + connection, + template=template, + filter=filter_sql, + **extra_context, + ) + return sql, (*params, *filter_params) + else: + copy = self.copy() + copy.filter = None + source_expressions = copy.get_source_expressions() + condition = When(self.filter, then=source_expressions[0]) + copy.set_source_expressions([Case(condition)] + source_expressions[1:]) + return super(Aggregate, copy).as_sql( + compiler, connection, **extra_context + ) + return super().as_sql(compiler, connection, **extra_context) + + def _get_repr_options(self): + options = super()._get_repr_options() + if self.distinct: + options["distinct"] = self.distinct + if self.filter: + options["filter"] = self.filter + return options + + +class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate): + function = "AVG" + name = "Avg" + allow_distinct = True + + +class Count(Aggregate): + function = "COUNT" + name = "Count" + output_field = IntegerField() + allow_distinct = True + empty_result_set_value = 0 + + def __init__(self, expression, filter=None, **extra): + if expression == "*": + expression = Star() + if isinstance(expression, Star) and filter is not None: + raise ValueError("Star cannot be used with filter. Please specify a field.") + super().__init__(expression, filter=filter, **extra) + + +class Max(Aggregate): + function = "MAX" + name = "Max" + + +class Min(Aggregate): + function = "MIN" + name = "Min" + + +class StdDev(NumericOutputFieldMixin, Aggregate): + name = "StdDev" + + def __init__(self, expression, sample=False, **extra): + self.function = "STDDEV_SAMP" if sample else "STDDEV_POP" + super().__init__(expression, **extra) + + def _get_repr_options(self): + return {**super()._get_repr_options(), "sample": self.function == "STDDEV_SAMP"} + + +class Sum(FixDurationInputMixin, Aggregate): + function = "SUM" + name = "Sum" + allow_distinct = True + + +class Variance(NumericOutputFieldMixin, Aggregate): + name = "Variance" + + def __init__(self, expression, sample=False, **extra): + self.function = "VAR_SAMP" if sample else "VAR_POP" + super().__init__(expression, **extra) + + def _get_repr_options(self): + return {**super()._get_repr_options(), "sample": self.function == "VAR_SAMP"} diff --git a/testbed/django__django/django/db/models/base.py b/testbed/django__django/django/db/models/base.py new file mode 100644 index 0000000000000000000000000000000000000000..0711ec0d617dd7f02f88a39709d7e39da3c5fa58 --- /dev/null +++ b/testbed/django__django/django/db/models/base.py @@ -0,0 +1,2584 @@ +import copy +import inspect +import warnings +from functools import partialmethod +from itertools import chain + +from asgiref.sync import sync_to_async + +import django +from django.apps import apps +from django.conf import settings +from django.core import checks +from django.core.exceptions import ( + NON_FIELD_ERRORS, + FieldDoesNotExist, + FieldError, + MultipleObjectsReturned, + ObjectDoesNotExist, + ValidationError, +) +from django.db import ( + DJANGO_VERSION_PICKLE_KEY, + DatabaseError, + connection, + connections, + router, + transaction, +) +from django.db.models import NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value +from django.db.models.constants import LOOKUP_SEP +from django.db.models.constraints import CheckConstraint, UniqueConstraint +from django.db.models.deletion import CASCADE, Collector +from django.db.models.expressions import RawSQL +from django.db.models.fields.related import ( + ForeignObjectRel, + OneToOneField, + lazy_related_operation, + resolve_relation, +) +from django.db.models.functions import Coalesce +from django.db.models.manager import Manager +from django.db.models.options import Options +from django.db.models.query import F, Q +from django.db.models.signals import ( + class_prepared, + post_init, + post_save, + pre_init, + pre_save, +) +from django.db.models.utils import AltersData, make_model_tuple +from django.utils.encoding import force_str +from django.utils.hashable import make_hashable +from django.utils.text import capfirst, get_text_list +from django.utils.translation import gettext_lazy as _ + + +class Deferred: + def __repr__(self): + return "" + + def __str__(self): + return "" + + +DEFERRED = Deferred() + + +def subclass_exception(name, bases, module, attached_to): + """ + Create exception subclass. Used by ModelBase below. + + The exception is created in a way that allows it to be pickled, assuming + that the returned exception class will be added as an attribute to the + 'attached_to' class. + """ + return type( + name, + bases, + { + "__module__": module, + "__qualname__": "%s.%s" % (attached_to.__qualname__, name), + }, + ) + + +def _has_contribute_to_class(value): + # Only call contribute_to_class() if it's bound. + return not inspect.isclass(value) and hasattr(value, "contribute_to_class") + + +class ModelBase(type): + """Metaclass for all models.""" + + def __new__(cls, name, bases, attrs, **kwargs): + super_new = super().__new__ + + # Also ensure initialization is only performed for subclasses of Model + # (excluding Model class itself). + parents = [b for b in bases if isinstance(b, ModelBase)] + if not parents: + return super_new(cls, name, bases, attrs) + + # Create the class. + module = attrs.pop("__module__") + new_attrs = {"__module__": module} + classcell = attrs.pop("__classcell__", None) + if classcell is not None: + new_attrs["__classcell__"] = classcell + attr_meta = attrs.pop("Meta", None) + # Pass all attrs without a (Django-specific) contribute_to_class() + # method to type.__new__() so that they're properly initialized + # (i.e. __set_name__()). + contributable_attrs = {} + for obj_name, obj in attrs.items(): + if _has_contribute_to_class(obj): + contributable_attrs[obj_name] = obj + else: + new_attrs[obj_name] = obj + new_class = super_new(cls, name, bases, new_attrs, **kwargs) + + abstract = getattr(attr_meta, "abstract", False) + meta = attr_meta or getattr(new_class, "Meta", None) + base_meta = getattr(new_class, "_meta", None) + + app_label = None + + # Look for an application configuration to attach the model to. + app_config = apps.get_containing_app_config(module) + + if getattr(meta, "app_label", None) is None: + if app_config is None: + if not abstract: + raise RuntimeError( + "Model class %s.%s doesn't declare an explicit " + "app_label and isn't in an application in " + "INSTALLED_APPS." % (module, name) + ) + + else: + app_label = app_config.label + + new_class.add_to_class("_meta", Options(meta, app_label)) + if not abstract: + new_class.add_to_class( + "DoesNotExist", + subclass_exception( + "DoesNotExist", + tuple( + x.DoesNotExist + for x in parents + if hasattr(x, "_meta") and not x._meta.abstract + ) + or (ObjectDoesNotExist,), + module, + attached_to=new_class, + ), + ) + new_class.add_to_class( + "MultipleObjectsReturned", + subclass_exception( + "MultipleObjectsReturned", + tuple( + x.MultipleObjectsReturned + for x in parents + if hasattr(x, "_meta") and not x._meta.abstract + ) + or (MultipleObjectsReturned,), + module, + attached_to=new_class, + ), + ) + if base_meta and not base_meta.abstract: + # Non-abstract child classes inherit some attributes from their + # non-abstract parent (unless an ABC comes before it in the + # method resolution order). + if not hasattr(meta, "ordering"): + new_class._meta.ordering = base_meta.ordering + if not hasattr(meta, "get_latest_by"): + new_class._meta.get_latest_by = base_meta.get_latest_by + + is_proxy = new_class._meta.proxy + + # If the model is a proxy, ensure that the base class + # hasn't been swapped out. + if is_proxy and base_meta and base_meta.swapped: + raise TypeError( + "%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped) + ) + + # Add remaining attributes (those with a contribute_to_class() method) + # to the class. + for obj_name, obj in contributable_attrs.items(): + new_class.add_to_class(obj_name, obj) + + # All the fields of any type declared on this model + new_fields = chain( + new_class._meta.local_fields, + new_class._meta.local_many_to_many, + new_class._meta.private_fields, + ) + field_names = {f.name for f in new_fields} + + # Basic setup for proxy models. + if is_proxy: + base = None + for parent in [kls for kls in parents if hasattr(kls, "_meta")]: + if parent._meta.abstract: + if parent._meta.fields: + raise TypeError( + "Abstract base class containing model fields not " + "permitted for proxy model '%s'." % name + ) + else: + continue + if base is None: + base = parent + elif parent._meta.concrete_model is not base._meta.concrete_model: + raise TypeError( + "Proxy model '%s' has more than one non-abstract model base " + "class." % name + ) + if base is None: + raise TypeError( + "Proxy model '%s' has no non-abstract model base class." % name + ) + new_class._meta.setup_proxy(base) + new_class._meta.concrete_model = base._meta.concrete_model + else: + new_class._meta.concrete_model = new_class + + # Collect the parent links for multi-table inheritance. + parent_links = {} + for base in reversed([new_class] + parents): + # Conceptually equivalent to `if base is Model`. + if not hasattr(base, "_meta"): + continue + # Skip concrete parent classes. + if base != new_class and not base._meta.abstract: + continue + # Locate OneToOneField instances. + for field in base._meta.local_fields: + if isinstance(field, OneToOneField) and field.remote_field.parent_link: + related = resolve_relation(new_class, field.remote_field.model) + parent_links[make_model_tuple(related)] = field + + # Track fields inherited from base models. + inherited_attributes = set() + # Do the appropriate setup for any model parents. + for base in new_class.mro(): + if base not in parents or not hasattr(base, "_meta"): + # Things without _meta aren't functional models, so they're + # uninteresting parents. + inherited_attributes.update(base.__dict__) + continue + + parent_fields = base._meta.local_fields + base._meta.local_many_to_many + if not base._meta.abstract: + # Check for clashes between locally declared fields and those + # on the base classes. + for field in parent_fields: + if field.name in field_names: + raise FieldError( + "Local field %r in class %r clashes with field of " + "the same name from base class %r." + % ( + field.name, + name, + base.__name__, + ) + ) + else: + inherited_attributes.add(field.name) + + # Concrete classes... + base = base._meta.concrete_model + base_key = make_model_tuple(base) + if base_key in parent_links: + field = parent_links[base_key] + elif not is_proxy: + attr_name = "%s_ptr" % base._meta.model_name + field = OneToOneField( + base, + on_delete=CASCADE, + name=attr_name, + auto_created=True, + parent_link=True, + ) + + if attr_name in field_names: + raise FieldError( + "Auto-generated field '%s' in class %r for " + "parent_link to base class %r clashes with " + "declared field of the same name." + % ( + attr_name, + name, + base.__name__, + ) + ) + + # Only add the ptr field if it's not already present; + # e.g. migrations will already have it specified + if not hasattr(new_class, attr_name): + new_class.add_to_class(attr_name, field) + else: + field = None + new_class._meta.parents[base] = field + else: + base_parents = base._meta.parents.copy() + + # Add fields from abstract base class if it wasn't overridden. + for field in parent_fields: + if ( + field.name not in field_names + and field.name not in new_class.__dict__ + and field.name not in inherited_attributes + ): + new_field = copy.deepcopy(field) + new_class.add_to_class(field.name, new_field) + # Replace parent links defined on this base by the new + # field. It will be appropriately resolved if required. + if field.one_to_one: + for parent, parent_link in base_parents.items(): + if field == parent_link: + base_parents[parent] = new_field + + # Pass any non-abstract parent classes onto child. + new_class._meta.parents.update(base_parents) + + # Inherit private fields (like GenericForeignKey) from the parent + # class + for field in base._meta.private_fields: + if field.name in field_names: + if not base._meta.abstract: + raise FieldError( + "Local field %r in class %r clashes with field of " + "the same name from base class %r." + % ( + field.name, + name, + base.__name__, + ) + ) + else: + field = copy.deepcopy(field) + if not base._meta.abstract: + field.mti_inherited = True + new_class.add_to_class(field.name, field) + + # Copy indexes so that index names are unique when models extend an + # abstract model. + new_class._meta.indexes = [ + copy.deepcopy(idx) for idx in new_class._meta.indexes + ] + + if abstract: + # Abstract base models can't be instantiated and don't appear in + # the list of models for an app. We do the final setup for them a + # little differently from normal models. + attr_meta.abstract = False + new_class.Meta = attr_meta + return new_class + + new_class._prepare() + new_class._meta.apps.register_model(new_class._meta.app_label, new_class) + return new_class + + def add_to_class(cls, name, value): + if _has_contribute_to_class(value): + value.contribute_to_class(cls, name) + else: + setattr(cls, name, value) + + def _prepare(cls): + """Create some methods once self._meta has been populated.""" + opts = cls._meta + opts._prepare(cls) + + if opts.order_with_respect_to: + cls.get_next_in_order = partialmethod( + cls._get_next_or_previous_in_order, is_next=True + ) + cls.get_previous_in_order = partialmethod( + cls._get_next_or_previous_in_order, is_next=False + ) + + # Defer creating accessors on the foreign class until it has been + # created and registered. If remote_field is None, we're ordering + # with respect to a GenericForeignKey and don't know what the + # foreign class is - we'll add those accessors later in + # contribute_to_class(). + if opts.order_with_respect_to.remote_field: + wrt = opts.order_with_respect_to + remote = wrt.remote_field.model + lazy_related_operation(make_foreign_order_accessors, cls, remote) + + # Give the class a docstring -- its definition. + if cls.__doc__ is None: + cls.__doc__ = "%s(%s)" % ( + cls.__name__, + ", ".join(f.name for f in opts.fields), + ) + + get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get( + opts.label_lower + ) + if get_absolute_url_override: + setattr(cls, "get_absolute_url", get_absolute_url_override) + + if not opts.managers: + if any(f.name == "objects" for f in opts.fields): + raise ValueError( + "Model %s must specify a custom Manager, because it has a " + "field named 'objects'." % cls.__name__ + ) + manager = Manager() + manager.auto_created = True + cls.add_to_class("objects", manager) + + # Set the name of _meta.indexes. This can't be done in + # Options.contribute_to_class() because fields haven't been added to + # the model at that point. + for index in cls._meta.indexes: + if not index.name: + index.set_name_with_model(cls) + + class_prepared.send(sender=cls) + + @property + def _base_manager(cls): + return cls._meta.base_manager + + @property + def _default_manager(cls): + return cls._meta.default_manager + + +class ModelStateFieldsCacheDescriptor: + def __get__(self, instance, cls=None): + if instance is None: + return self + res = instance.fields_cache = {} + return res + + +class ModelState: + """Store model instance state.""" + + db = None + # If true, uniqueness validation checks will consider this a new, unsaved + # object. Necessary for correct validation of new instances of objects with + # explicit (non-auto) PKs. This impacts validation only; it has no effect + # on the actual save. + adding = True + fields_cache = ModelStateFieldsCacheDescriptor() + + +class Model(AltersData, metaclass=ModelBase): + def __init__(self, *args, **kwargs): + # Alias some things as locals to avoid repeat global lookups + cls = self.__class__ + opts = self._meta + _setattr = setattr + _DEFERRED = DEFERRED + if opts.abstract: + raise TypeError("Abstract models cannot be instantiated.") + + pre_init.send(sender=cls, args=args, kwargs=kwargs) + + # Set up the storage for instance state + self._state = ModelState() + + # There is a rather weird disparity here; if kwargs, it's set, then args + # overrides it. It should be one or the other; don't duplicate the work + # The reason for the kwargs check is that standard iterator passes in by + # args, and instantiation for iteration is 33% faster. + if len(args) > len(opts.concrete_fields): + # Daft, but matches old exception sans the err msg. + raise IndexError("Number of args exceeds number of fields") + + if not kwargs: + fields_iter = iter(opts.concrete_fields) + # The ordering of the zip calls matter - zip throws StopIteration + # when an iter throws it. So if the first iter throws it, the second + # is *not* consumed. We rely on this, so don't change the order + # without changing the logic. + for val, field in zip(args, fields_iter): + if val is _DEFERRED: + continue + _setattr(self, field.attname, val) + else: + # Slower, kwargs-ready version. + fields_iter = iter(opts.fields) + for val, field in zip(args, fields_iter): + if val is _DEFERRED: + continue + _setattr(self, field.attname, val) + if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED: + raise TypeError( + f"{cls.__qualname__}() got both positional and " + f"keyword arguments for field '{field.name}'." + ) + + # Now we're left with the unprocessed fields that *must* come from + # keywords, or default. + + for field in fields_iter: + is_related_object = False + # Virtual field + if field.attname not in kwargs and field.column is None: + continue + if kwargs: + if isinstance(field.remote_field, ForeignObjectRel): + try: + # Assume object instance was passed in. + rel_obj = kwargs.pop(field.name) + is_related_object = True + except KeyError: + try: + # Object instance wasn't passed in -- must be an ID. + val = kwargs.pop(field.attname) + except KeyError: + val = field.get_default() + else: + try: + val = kwargs.pop(field.attname) + except KeyError: + # This is done with an exception rather than the + # default argument on pop because we don't want + # get_default() to be evaluated, and then not used. + # Refs #12057. + val = field.get_default() + else: + val = field.get_default() + + if is_related_object: + # If we are passed a related instance, set it using the + # field.name instead of field.attname (e.g. "user" instead of + # "user_id") so that the object gets properly cached (and type + # checked) by the RelatedObjectDescriptor. + if rel_obj is not _DEFERRED: + _setattr(self, field.name, rel_obj) + else: + if val is not _DEFERRED: + _setattr(self, field.attname, val) + + if kwargs: + property_names = opts._property_names + unexpected = () + for prop, value in kwargs.items(): + # Any remaining kwargs must correspond to properties or virtual + # fields. + if prop in property_names: + if value is not _DEFERRED: + _setattr(self, prop, value) + else: + try: + opts.get_field(prop) + except FieldDoesNotExist: + unexpected += (prop,) + else: + if value is not _DEFERRED: + _setattr(self, prop, value) + if unexpected: + unexpected_names = ", ".join(repr(n) for n in unexpected) + raise TypeError( + f"{cls.__name__}() got unexpected keyword arguments: " + f"{unexpected_names}" + ) + super().__init__() + post_init.send(sender=cls, instance=self) + + @classmethod + def from_db(cls, db, field_names, values): + if len(values) != len(cls._meta.concrete_fields): + values_iter = iter(values) + values = [ + next(values_iter) if f.attname in field_names else DEFERRED + for f in cls._meta.concrete_fields + ] + new = cls(*values) + new._state.adding = False + new._state.db = db + return new + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + def __str__(self): + return "%s object (%s)" % (self.__class__.__name__, self.pk) + + def __eq__(self, other): + if not isinstance(other, Model): + return NotImplemented + if self._meta.concrete_model != other._meta.concrete_model: + return False + my_pk = self.pk + if my_pk is None: + return self is other + return my_pk == other.pk + + def __hash__(self): + if self.pk is None: + raise TypeError("Model instances without primary key value are unhashable") + return hash(self.pk) + + def __reduce__(self): + data = self.__getstate__() + data[DJANGO_VERSION_PICKLE_KEY] = django.__version__ + class_id = self._meta.app_label, self._meta.object_name + return model_unpickle, (class_id,), data + + def __getstate__(self): + """Hook to allow choosing the attributes to pickle.""" + state = self.__dict__.copy() + state["_state"] = copy.copy(state["_state"]) + state["_state"].fields_cache = state["_state"].fields_cache.copy() + # memoryview cannot be pickled, so cast it to bytes and store + # separately. + _memoryview_attrs = [] + for attr, value in state.items(): + if isinstance(value, memoryview): + _memoryview_attrs.append((attr, bytes(value))) + if _memoryview_attrs: + state["_memoryview_attrs"] = _memoryview_attrs + for attr, value in _memoryview_attrs: + state.pop(attr) + return state + + def __setstate__(self, state): + pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) + if pickled_version: + if pickled_version != django.__version__: + warnings.warn( + "Pickled model instance's Django version %s does not " + "match the current version %s." + % (pickled_version, django.__version__), + RuntimeWarning, + stacklevel=2, + ) + else: + warnings.warn( + "Pickled model instance's Django version is not specified.", + RuntimeWarning, + stacklevel=2, + ) + if "_memoryview_attrs" in state: + for attr, value in state.pop("_memoryview_attrs"): + state[attr] = memoryview(value) + self.__dict__.update(state) + + def _get_pk_val(self, meta=None): + meta = meta or self._meta + return getattr(self, meta.pk.attname) + + def _set_pk_val(self, value): + for parent_link in self._meta.parents.values(): + if parent_link and parent_link != self._meta.pk: + setattr(self, parent_link.target_field.attname, value) + return setattr(self, self._meta.pk.attname, value) + + pk = property(_get_pk_val, _set_pk_val) + + def get_deferred_fields(self): + """ + Return a set containing names of deferred fields for this instance. + """ + return { + f.attname + for f in self._meta.concrete_fields + if f.attname not in self.__dict__ + } + + def refresh_from_db(self, using=None, fields=None): + """ + Reload field values from the database. + + By default, the reloading happens from the database this instance was + loaded from, or by the read router if this instance wasn't loaded from + any database. The using parameter will override the default. + + Fields can be used to specify which fields to reload. The fields + should be an iterable of field attnames. If fields is None, then + all non-deferred fields are reloaded. + + When accessing deferred fields of an instance, the deferred loading + of the field will call this method. + """ + if fields is None: + self._prefetched_objects_cache = {} + else: + prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) + for field in fields: + if field in prefetched_objects_cache: + del prefetched_objects_cache[field] + fields.remove(field) + if not fields: + return + if any(LOOKUP_SEP in f for f in fields): + raise ValueError( + 'Found "%s" in fields argument. Relations and transforms ' + "are not allowed in fields." % LOOKUP_SEP + ) + + hints = {"instance": self} + db_instance_qs = self.__class__._base_manager.db_manager( + using, hints=hints + ).filter(pk=self.pk) + + # Use provided fields, if not set then reload all non-deferred fields. + deferred_fields = self.get_deferred_fields() + if fields is not None: + fields = list(fields) + db_instance_qs = db_instance_qs.only(*fields) + elif deferred_fields: + fields = [ + f.attname + for f in self._meta.concrete_fields + if f.attname not in deferred_fields + ] + db_instance_qs = db_instance_qs.only(*fields) + + db_instance = db_instance_qs.get() + non_loaded_fields = db_instance.get_deferred_fields() + for field in self._meta.concrete_fields: + if field.attname in non_loaded_fields: + # This field wasn't refreshed - skip ahead. + continue + setattr(self, field.attname, getattr(db_instance, field.attname)) + # Clear cached foreign keys. + if field.is_relation and field.is_cached(self): + field.delete_cached_value(self) + + # Clear cached relations. + for field in self._meta.related_objects: + if field.is_cached(self): + field.delete_cached_value(self) + + # Clear cached private relations. + for field in self._meta.private_fields: + if field.is_relation and field.is_cached(self): + field.delete_cached_value(self) + + self._state.db = db_instance._state.db + + async def arefresh_from_db(self, using=None, fields=None): + return await sync_to_async(self.refresh_from_db)(using=using, fields=fields) + + def serializable_value(self, field_name): + """ + Return the value of the field name for this instance. If the field is + a foreign key, return the id value instead of the object. If there's + no Field object with this name on the model, return the model + attribute's value. + + Used to serialize a field's value (in the serializer, or form output, + for example). Normally, you would just access the attribute directly + and not use this method. + """ + try: + field = self._meta.get_field(field_name) + except FieldDoesNotExist: + return getattr(self, field_name) + return getattr(self, field.attname) + + def save( + self, force_insert=False, force_update=False, using=None, update_fields=None + ): + """ + Save the current instance. Override this in a subclass if you want to + control the saving process. + + The 'force_insert' and 'force_update' parameters can be used to insist + that the "save" must be an SQL insert or update (or equivalent for + non-SQL backends), respectively. Normally, they should not be set. + """ + self._prepare_related_fields_for_save(operation_name="save") + + using = using or router.db_for_write(self.__class__, instance=self) + if force_insert and (force_update or update_fields): + raise ValueError("Cannot force both insert and updating in model saving.") + + deferred_fields = self.get_deferred_fields() + if update_fields is not None: + # If update_fields is empty, skip the save. We do also check for + # no-op saves later on for inheritance cases. This bailout is + # still needed for skipping signal sending. + if not update_fields: + return + + update_fields = frozenset(update_fields) + field_names = self._meta._non_pk_concrete_field_names + non_model_fields = update_fields.difference(field_names) + + if non_model_fields: + raise ValueError( + "The following fields do not exist in this model, are m2m " + "fields, or are non-concrete fields: %s" + % ", ".join(non_model_fields) + ) + + # If saving to the same database, and this model is deferred, then + # automatically do an "update_fields" save on the loaded fields. + elif not force_insert and deferred_fields and using == self._state.db: + field_names = set() + for field in self._meta.concrete_fields: + if not field.primary_key and not hasattr(field, "through"): + field_names.add(field.attname) + loaded_fields = field_names.difference(deferred_fields) + if loaded_fields: + update_fields = frozenset(loaded_fields) + + self.save_base( + using=using, + force_insert=force_insert, + force_update=force_update, + update_fields=update_fields, + ) + + save.alters_data = True + + async def asave( + self, force_insert=False, force_update=False, using=None, update_fields=None + ): + return await sync_to_async(self.save)( + force_insert=force_insert, + force_update=force_update, + using=using, + update_fields=update_fields, + ) + + asave.alters_data = True + + @classmethod + def _validate_force_insert(cls, force_insert): + if force_insert is False: + return () + if force_insert is True: + return (cls,) + if not isinstance(force_insert, tuple): + raise TypeError("force_insert must be a bool or tuple.") + for member in force_insert: + if not isinstance(member, ModelBase): + raise TypeError( + f"Invalid force_insert member. {member!r} must be a model subclass." + ) + if not issubclass(cls, member): + raise TypeError( + f"Invalid force_insert member. {member.__qualname__} must be a " + f"base of {cls.__qualname__}." + ) + return force_insert + + def save_base( + self, + raw=False, + force_insert=False, + force_update=False, + using=None, + update_fields=None, + ): + """ + Handle the parts of saving which should be done only once per save, + yet need to be done in raw saves, too. This includes some sanity + checks and signal sending. + + The 'raw' argument is telling save_base not to save any parent + models and not to do any changes to the values before save. This + is used by fixture loading. + """ + using = using or router.db_for_write(self.__class__, instance=self) + assert not (force_insert and (force_update or update_fields)) + assert update_fields is None or update_fields + cls = origin = self.__class__ + # Skip proxies, but keep the origin as the proxy model. + if cls._meta.proxy: + cls = cls._meta.concrete_model + meta = cls._meta + if not meta.auto_created: + pre_save.send( + sender=origin, + instance=self, + raw=raw, + using=using, + update_fields=update_fields, + ) + # A transaction isn't needed if one query is issued. + if meta.parents: + context_manager = transaction.atomic(using=using, savepoint=False) + else: + context_manager = transaction.mark_for_rollback_on_error(using=using) + with context_manager: + parent_inserted = False + if not raw: + # Validate force insert only when parents are inserted. + force_insert = self._validate_force_insert(force_insert) + parent_inserted = self._save_parents( + cls, using, update_fields, force_insert + ) + updated = self._save_table( + raw, + cls, + force_insert or parent_inserted, + force_update, + using, + update_fields, + ) + # Store the database on which the object was saved + self._state.db = using + # Once saved, this is no longer a to-be-added instance. + self._state.adding = False + + # Signal that the save is complete + if not meta.auto_created: + post_save.send( + sender=origin, + instance=self, + created=(not updated), + update_fields=update_fields, + raw=raw, + using=using, + ) + + save_base.alters_data = True + + def _save_parents( + self, cls, using, update_fields, force_insert, updated_parents=None + ): + """Save all the parents of cls using values from self.""" + meta = cls._meta + inserted = False + if updated_parents is None: + updated_parents = {} + for parent, field in meta.parents.items(): + # Make sure the link fields are synced between parent and self. + if ( + field + and getattr(self, parent._meta.pk.attname) is None + and getattr(self, field.attname) is not None + ): + setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) + if (parent_updated := updated_parents.get(parent)) is None: + parent_inserted = self._save_parents( + cls=parent, + using=using, + update_fields=update_fields, + force_insert=force_insert, + updated_parents=updated_parents, + ) + updated = self._save_table( + cls=parent, + using=using, + update_fields=update_fields, + force_insert=parent_inserted or issubclass(parent, force_insert), + ) + if not updated: + inserted = True + updated_parents[parent] = updated + elif not parent_updated: + inserted = True + # Set the parent's PK value to self. + if field: + setattr(self, field.attname, self._get_pk_val(parent._meta)) + # Since we didn't have an instance of the parent handy set + # attname directly, bypassing the descriptor. Invalidate + # the related object cache, in case it's been accidentally + # populated. A fresh instance will be re-built from the + # database if necessary. + if field.is_cached(self): + field.delete_cached_value(self) + return inserted + + def _save_table( + self, + raw=False, + cls=None, + force_insert=False, + force_update=False, + using=None, + update_fields=None, + ): + """ + Do the heavy-lifting involved in saving. Update or insert the data + for a single table. + """ + meta = cls._meta + non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] + + if update_fields: + non_pks = [ + f + for f in non_pks + if f.name in update_fields or f.attname in update_fields + ] + + pk_val = self._get_pk_val(meta) + if pk_val is None: + pk_val = meta.pk.get_pk_value_on_save(self) + setattr(self, meta.pk.attname, pk_val) + pk_set = pk_val is not None + if not pk_set and (force_update or update_fields): + raise ValueError("Cannot force an update in save() with no primary key.") + updated = False + # Skip an UPDATE when adding an instance and primary key has a default. + if ( + not raw + and not force_insert + and self._state.adding + and ( + (meta.pk.default and meta.pk.default is not NOT_PROVIDED) + or (meta.pk.db_default and meta.pk.db_default is not NOT_PROVIDED) + ) + ): + force_insert = True + # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. + if pk_set and not force_insert: + base_qs = cls._base_manager.using(using) + values = [ + ( + f, + None, + (getattr(self, f.attname) if raw else f.pre_save(self, False)), + ) + for f in non_pks + ] + forced_update = update_fields or force_update + updated = self._do_update( + base_qs, using, pk_val, values, update_fields, forced_update + ) + if force_update and not updated: + raise DatabaseError("Forced update did not affect any rows.") + if update_fields and not updated: + raise DatabaseError("Save with update_fields did not affect any rows.") + if not updated: + if meta.order_with_respect_to: + # If this is a model with an order_with_respect_to + # autopopulate the _order field + field = meta.order_with_respect_to + filter_args = field.get_filter_kwargs_for_object(self) + self._order = ( + cls._base_manager.using(using) + .filter(**filter_args) + .aggregate( + _order__max=Coalesce( + ExpressionWrapper( + Max("_order") + Value(1), output_field=IntegerField() + ), + Value(0), + ), + )["_order__max"] + ) + fields = meta.local_concrete_fields + if not pk_set: + fields = [f for f in fields if f is not meta.auto_field] + + returning_fields = meta.db_returning_fields + results = self._do_insert( + cls._base_manager, using, fields, returning_fields, raw + ) + if results: + for value, field in zip(results[0], returning_fields): + setattr(self, field.attname, value) + return updated + + def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): + """ + Try to update the model. Return True if the model was updated (if an + update query was done and a matching row was found in the DB). + """ + filtered = base_qs.filter(pk=pk_val) + if not values: + # We can end up here when saving a model in inheritance chain where + # update_fields doesn't target any field in current model. In that + # case we just say the update succeeded. Another case ending up here + # is a model with just PK - in that case check that the PK still + # exists. + return update_fields is not None or filtered.exists() + if self._meta.select_on_save and not forced_update: + return ( + filtered.exists() + and + # It may happen that the object is deleted from the DB right after + # this check, causing the subsequent UPDATE to return zero matching + # rows. The same result can occur in some rare cases when the + # database returns zero despite the UPDATE being executed + # successfully (a row is matched and updated). In order to + # distinguish these two cases, the object's existence in the + # database is again checked for if the UPDATE query returns 0. + (filtered._update(values) > 0 or filtered.exists()) + ) + return filtered._update(values) > 0 + + def _do_insert(self, manager, using, fields, returning_fields, raw): + """ + Do an INSERT. If returning_fields is defined then this method should + return the newly created data for the model. + """ + return manager._insert( + [self], + fields=fields, + returning_fields=returning_fields, + using=using, + raw=raw, + ) + + def _prepare_related_fields_for_save(self, operation_name, fields=None): + # Ensure that a model instance without a PK hasn't been assigned to + # a ForeignKey, GenericForeignKey or OneToOneField on this model. If + # the field is nullable, allowing the save would result in silent data + # loss. + for field in self._meta.concrete_fields: + if fields and field not in fields: + continue + # If the related field isn't cached, then an instance hasn't been + # assigned and there's no need to worry about this check. + if field.is_relation and field.is_cached(self): + obj = getattr(self, field.name, None) + if not obj: + continue + # A pk may have been assigned manually to a model instance not + # saved to the database (or auto-generated in a case like + # UUIDField), but we allow the save to proceed and rely on the + # database to raise an IntegrityError if applicable. If + # constraints aren't supported by the database, there's the + # unavoidable risk of data corruption. + if obj.pk is None: + # Remove the object from a related instance cache. + if not field.remote_field.multiple: + field.remote_field.delete_cached_value(obj) + raise ValueError( + "%s() prohibited to prevent data loss due to unsaved " + "related object '%s'." % (operation_name, field.name) + ) + elif getattr(self, field.attname) in field.empty_values: + # Set related object if it has been saved after an + # assignment. + setattr(self, field.name, obj) + # If the relationship's pk/to_field was changed, clear the + # cached relationship. + if getattr(obj, field.target_field.attname) != getattr( + self, field.attname + ): + field.delete_cached_value(self) + # GenericForeignKeys are private. + for field in self._meta.private_fields: + if fields and field not in fields: + continue + if ( + field.is_relation + and field.is_cached(self) + and hasattr(field, "fk_field") + ): + obj = field.get_cached_value(self, default=None) + if obj and obj.pk is None: + raise ValueError( + f"{operation_name}() prohibited to prevent data loss due to " + f"unsaved related object '{field.name}'." + ) + + def delete(self, using=None, keep_parents=False): + if self.pk is None: + raise ValueError( + "%s object can't be deleted because its %s attribute is set " + "to None." % (self._meta.object_name, self._meta.pk.attname) + ) + using = using or router.db_for_write(self.__class__, instance=self) + collector = Collector(using=using, origin=self) + collector.collect([self], keep_parents=keep_parents) + return collector.delete() + + delete.alters_data = True + + async def adelete(self, using=None, keep_parents=False): + return await sync_to_async(self.delete)( + using=using, + keep_parents=keep_parents, + ) + + adelete.alters_data = True + + def _get_FIELD_display(self, field): + value = getattr(self, field.attname) + choices_dict = dict(make_hashable(field.flatchoices)) + # force_str() to coerce lazy strings. + return force_str( + choices_dict.get(make_hashable(value), value), strings_only=True + ) + + def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): + if not self.pk: + raise ValueError("get_next/get_previous cannot be used on unsaved objects.") + op = "gt" if is_next else "lt" + order = "" if is_next else "-" + param = getattr(self, field.attname) + q = Q.create([(field.name, param), (f"pk__{op}", self.pk)], connector=Q.AND) + q = Q.create([q, (f"{field.name}__{op}", param)], connector=Q.OR) + qs = ( + self.__class__._default_manager.using(self._state.db) + .filter(**kwargs) + .filter(q) + .order_by("%s%s" % (order, field.name), "%spk" % order) + ) + try: + return qs[0] + except IndexError: + raise self.DoesNotExist( + "%s matching query does not exist." % self.__class__._meta.object_name + ) + + def _get_next_or_previous_in_order(self, is_next): + cachename = "__%s_order_cache" % is_next + if not hasattr(self, cachename): + op = "gt" if is_next else "lt" + order = "_order" if is_next else "-_order" + order_field = self._meta.order_with_respect_to + filter_args = order_field.get_filter_kwargs_for_object(self) + obj = ( + self.__class__._default_manager.filter(**filter_args) + .filter( + **{ + "_order__%s" + % op: self.__class__._default_manager.values("_order").filter( + **{self._meta.pk.name: self.pk} + ) + } + ) + .order_by(order)[:1] + .get() + ) + setattr(self, cachename, obj) + return getattr(self, cachename) + + def _get_field_value_map(self, meta, exclude=None): + if exclude is None: + exclude = set() + meta = meta or self._meta + return { + field.name: Value(getattr(self, field.attname), field) + for field in meta.local_concrete_fields + if field.name not in exclude + } + + def prepare_database_save(self, field): + if self.pk is None: + raise ValueError( + "Unsaved model instance %r cannot be used in an ORM query." % self + ) + return getattr(self, field.remote_field.get_related_field().attname) + + def clean(self): + """ + Hook for doing any extra model-wide validation after clean() has been + called on every field by self.clean_fields. Any ValidationError raised + by this method will not be associated with a particular field; it will + have a special-case association with the field defined by NON_FIELD_ERRORS. + """ + pass + + def validate_unique(self, exclude=None): + """ + Check unique constraints on the model and raise ValidationError if any + failed. + """ + unique_checks, date_checks = self._get_unique_checks(exclude=exclude) + + errors = self._perform_unique_checks(unique_checks) + date_errors = self._perform_date_checks(date_checks) + + for k, v in date_errors.items(): + errors.setdefault(k, []).extend(v) + + if errors: + raise ValidationError(errors) + + def _get_unique_checks(self, exclude=None, include_meta_constraints=False): + """ + Return a list of checks to perform. Since validate_unique() could be + called from a ModelForm, some fields may have been excluded; we can't + perform a unique check on a model that is missing fields involved + in that check. Fields that did not validate should also be excluded, + but they need to be passed in via the exclude argument. + """ + if exclude is None: + exclude = set() + unique_checks = [] + + unique_togethers = [(self.__class__, self._meta.unique_together)] + constraints = [] + if include_meta_constraints: + constraints = [(self.__class__, self._meta.total_unique_constraints)] + for parent_class in self._meta.get_parent_list(): + if parent_class._meta.unique_together: + unique_togethers.append( + (parent_class, parent_class._meta.unique_together) + ) + if include_meta_constraints and parent_class._meta.total_unique_constraints: + constraints.append( + (parent_class, parent_class._meta.total_unique_constraints) + ) + + for model_class, unique_together in unique_togethers: + for check in unique_together: + if not any(name in exclude for name in check): + # Add the check if the field isn't excluded. + unique_checks.append((model_class, tuple(check))) + + if include_meta_constraints: + for model_class, model_constraints in constraints: + for constraint in model_constraints: + if not any(name in exclude for name in constraint.fields): + unique_checks.append((model_class, constraint.fields)) + + # These are checks for the unique_for_. + date_checks = [] + + # Gather a list of checks for fields declared as unique and add them to + # the list of checks. + + fields_with_class = [(self.__class__, self._meta.local_fields)] + for parent_class in self._meta.get_parent_list(): + fields_with_class.append((parent_class, parent_class._meta.local_fields)) + + for model_class, fields in fields_with_class: + for f in fields: + name = f.name + if name in exclude: + continue + if f.unique: + unique_checks.append((model_class, (name,))) + if f.unique_for_date and f.unique_for_date not in exclude: + date_checks.append((model_class, "date", name, f.unique_for_date)) + if f.unique_for_year and f.unique_for_year not in exclude: + date_checks.append((model_class, "year", name, f.unique_for_year)) + if f.unique_for_month and f.unique_for_month not in exclude: + date_checks.append((model_class, "month", name, f.unique_for_month)) + return unique_checks, date_checks + + def _perform_unique_checks(self, unique_checks): + errors = {} + + for model_class, unique_check in unique_checks: + # Try to look up an existing object with the same values as this + # object's values for all the unique field. + + lookup_kwargs = {} + for field_name in unique_check: + f = self._meta.get_field(field_name) + lookup_value = getattr(self, f.attname) + # TODO: Handle multiple backends with different feature flags. + if lookup_value is None or ( + lookup_value == "" + and connection.features.interprets_empty_strings_as_nulls + ): + # no value, skip the lookup + continue + if f.primary_key and not self._state.adding: + # no need to check for unique primary key when editing + continue + lookup_kwargs[str(field_name)] = lookup_value + + # some fields were skipped, no reason to do the check + if len(unique_check) != len(lookup_kwargs): + continue + + qs = model_class._default_manager.filter(**lookup_kwargs) + + # Exclude the current object from the query if we are editing an + # instance (as opposed to creating a new one) + # Note that we need to use the pk as defined by model_class, not + # self.pk. These can be different fields because model inheritance + # allows single model to have effectively multiple primary keys. + # Refs #17615. + model_class_pk = self._get_pk_val(model_class._meta) + if not self._state.adding and model_class_pk is not None: + qs = qs.exclude(pk=model_class_pk) + if qs.exists(): + if len(unique_check) == 1: + key = unique_check[0] + else: + key = NON_FIELD_ERRORS + errors.setdefault(key, []).append( + self.unique_error_message(model_class, unique_check) + ) + + return errors + + def _perform_date_checks(self, date_checks): + errors = {} + for model_class, lookup_type, field, unique_for in date_checks: + lookup_kwargs = {} + # there's a ticket to add a date lookup, we can remove this special + # case if that makes it's way in + date = getattr(self, unique_for) + if date is None: + continue + if lookup_type == "date": + lookup_kwargs["%s__day" % unique_for] = date.day + lookup_kwargs["%s__month" % unique_for] = date.month + lookup_kwargs["%s__year" % unique_for] = date.year + else: + lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr( + date, lookup_type + ) + lookup_kwargs[field] = getattr(self, field) + + qs = model_class._default_manager.filter(**lookup_kwargs) + # Exclude the current object from the query if we are editing an + # instance (as opposed to creating a new one) + if not self._state.adding and self.pk is not None: + qs = qs.exclude(pk=self.pk) + + if qs.exists(): + errors.setdefault(field, []).append( + self.date_error_message(lookup_type, field, unique_for) + ) + return errors + + def date_error_message(self, lookup_type, field_name, unique_for): + opts = self._meta + field = opts.get_field(field_name) + return ValidationError( + message=field.error_messages["unique_for_date"], + code="unique_for_date", + params={ + "model": self, + "model_name": capfirst(opts.verbose_name), + "lookup_type": lookup_type, + "field": field_name, + "field_label": capfirst(field.verbose_name), + "date_field": unique_for, + "date_field_label": capfirst(opts.get_field(unique_for).verbose_name), + }, + ) + + def unique_error_message(self, model_class, unique_check): + opts = model_class._meta + + params = { + "model": self, + "model_class": model_class, + "model_name": capfirst(opts.verbose_name), + "unique_check": unique_check, + } + + # A unique field + if len(unique_check) == 1: + field = opts.get_field(unique_check[0]) + params["field_label"] = capfirst(field.verbose_name) + return ValidationError( + message=field.error_messages["unique"], + code="unique", + params=params, + ) + + # unique_together + else: + field_labels = [ + capfirst(opts.get_field(f).verbose_name) for f in unique_check + ] + params["field_labels"] = get_text_list(field_labels, _("and")) + return ValidationError( + message=_("%(model_name)s with this %(field_labels)s already exists."), + code="unique_together", + params=params, + ) + + def get_constraints(self): + constraints = [(self.__class__, self._meta.constraints)] + for parent_class in self._meta.get_parent_list(): + if parent_class._meta.constraints: + constraints.append((parent_class, parent_class._meta.constraints)) + return constraints + + def validate_constraints(self, exclude=None): + constraints = self.get_constraints() + using = router.db_for_write(self.__class__, instance=self) + + errors = {} + for model_class, model_constraints in constraints: + for constraint in model_constraints: + try: + constraint.validate(model_class, self, exclude=exclude, using=using) + except ValidationError as e: + if ( + getattr(e, "code", None) == "unique" + and len(constraint.fields) == 1 + ): + errors.setdefault(constraint.fields[0], []).append(e) + else: + errors = e.update_error_dict(errors) + if errors: + raise ValidationError(errors) + + def full_clean(self, exclude=None, validate_unique=True, validate_constraints=True): + """ + Call clean_fields(), clean(), validate_unique(), and + validate_constraints() on the model. Raise a ValidationError for any + errors that occur. + """ + errors = {} + if exclude is None: + exclude = set() + else: + exclude = set(exclude) + + try: + self.clean_fields(exclude=exclude) + except ValidationError as e: + errors = e.update_error_dict(errors) + + # Form.clean() is run even if other validation fails, so do the + # same with Model.clean() for consistency. + try: + self.clean() + except ValidationError as e: + errors = e.update_error_dict(errors) + + # Run unique checks, but only for fields that passed validation. + if validate_unique: + for name in errors: + if name != NON_FIELD_ERRORS and name not in exclude: + exclude.add(name) + try: + self.validate_unique(exclude=exclude) + except ValidationError as e: + errors = e.update_error_dict(errors) + + # Run constraints checks, but only for fields that passed validation. + if validate_constraints: + for name in errors: + if name != NON_FIELD_ERRORS and name not in exclude: + exclude.add(name) + try: + self.validate_constraints(exclude=exclude) + except ValidationError as e: + errors = e.update_error_dict(errors) + + if errors: + raise ValidationError(errors) + + def clean_fields(self, exclude=None): + """ + Clean all fields and raise a ValidationError containing a dict + of all validation errors if any occur. + """ + if exclude is None: + exclude = set() + + errors = {} + for f in self._meta.fields: + if f.name in exclude: + continue + # Skip validation for empty fields with blank=True. The developer + # is responsible for making sure they have a valid value. + raw_value = getattr(self, f.attname) + if f.blank and raw_value in f.empty_values: + continue + try: + setattr(self, f.attname, f.clean(raw_value, self)) + except ValidationError as e: + errors[f.name] = e.error_list + + if errors: + raise ValidationError(errors) + + @classmethod + def check(cls, **kwargs): + errors = [ + *cls._check_swappable(), + *cls._check_model(), + *cls._check_managers(**kwargs), + ] + if not cls._meta.swapped: + databases = kwargs.get("databases") or [] + errors += [ + *cls._check_fields(**kwargs), + *cls._check_m2m_through_same_relationship(), + *cls._check_long_column_names(databases), + ] + clash_errors = ( + *cls._check_id_field(), + *cls._check_field_name_clashes(), + *cls._check_model_name_db_lookup_clashes(), + *cls._check_property_name_related_field_accessor_clashes(), + *cls._check_single_primary_key(), + ) + errors.extend(clash_errors) + # If there are field name clashes, hide consequent column name + # clashes. + if not clash_errors: + errors.extend(cls._check_column_name_clashes()) + errors += [ + *cls._check_index_together(), + *cls._check_unique_together(), + *cls._check_indexes(databases), + *cls._check_ordering(), + *cls._check_constraints(databases), + *cls._check_default_pk(), + *cls._check_db_table_comment(databases), + ] + + return errors + + @classmethod + def _check_default_pk(cls): + if ( + not cls._meta.abstract + and cls._meta.pk.auto_created + and + # Inherited PKs are checked in parents models. + not ( + isinstance(cls._meta.pk, OneToOneField) + and cls._meta.pk.remote_field.parent_link + ) + and not settings.is_overridden("DEFAULT_AUTO_FIELD") + and cls._meta.app_config + and not cls._meta.app_config._is_default_auto_field_overridden + ): + return [ + checks.Warning( + f"Auto-created primary key used when not defining a " + f"primary key type, by default " + f"'{settings.DEFAULT_AUTO_FIELD}'.", + hint=( + f"Configure the DEFAULT_AUTO_FIELD setting or the " + f"{cls._meta.app_config.__class__.__qualname__}." + f"default_auto_field attribute to point to a subclass " + f"of AutoField, e.g. 'django.db.models.BigAutoField'." + ), + obj=cls, + id="models.W042", + ), + ] + return [] + + @classmethod + def _check_db_table_comment(cls, databases): + if not cls._meta.db_table_comment: + return [] + errors = [] + for db in databases: + if not router.allow_migrate_model(db, cls): + continue + connection = connections[db] + if not ( + connection.features.supports_comments + or "supports_comments" in cls._meta.required_db_features + ): + errors.append( + checks.Warning( + f"{connection.display_name} does not support comments on " + f"tables (db_table_comment).", + obj=cls, + id="models.W046", + ) + ) + return errors + + @classmethod + def _check_swappable(cls): + """Check if the swapped model exists.""" + errors = [] + if cls._meta.swapped: + try: + apps.get_model(cls._meta.swapped) + except ValueError: + errors.append( + checks.Error( + "'%s' is not of the form 'app_label.app_name'." + % cls._meta.swappable, + id="models.E001", + ) + ) + except LookupError: + app_label, model_name = cls._meta.swapped.split(".") + errors.append( + checks.Error( + "'%s' references '%s.%s', which has not been " + "installed, or is abstract." + % (cls._meta.swappable, app_label, model_name), + id="models.E002", + ) + ) + return errors + + @classmethod + def _check_model(cls): + errors = [] + if cls._meta.proxy: + if cls._meta.local_fields or cls._meta.local_many_to_many: + errors.append( + checks.Error( + "Proxy model '%s' contains model fields." % cls.__name__, + id="models.E017", + ) + ) + return errors + + @classmethod + def _check_managers(cls, **kwargs): + """Perform all manager checks.""" + errors = [] + for manager in cls._meta.managers: + errors.extend(manager.check(**kwargs)) + return errors + + @classmethod + def _check_fields(cls, **kwargs): + """Perform all field checks.""" + errors = [] + for field in cls._meta.local_fields: + errors.extend(field.check(**kwargs)) + for field in cls._meta.local_many_to_many: + errors.extend(field.check(from_model=cls, **kwargs)) + return errors + + @classmethod + def _check_m2m_through_same_relationship(cls): + """Check if no relationship model is used by more than one m2m field.""" + + errors = [] + seen_intermediary_signatures = [] + + fields = cls._meta.local_many_to_many + + # Skip when the target model wasn't found. + fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) + + # Skip when the relationship model wasn't found. + fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) + + for f in fields: + signature = ( + f.remote_field.model, + cls, + f.remote_field.through, + f.remote_field.through_fields, + ) + if signature in seen_intermediary_signatures: + errors.append( + checks.Error( + "The model has two identical many-to-many relations " + "through the intermediate model '%s'." + % f.remote_field.through._meta.label, + obj=cls, + id="models.E003", + ) + ) + else: + seen_intermediary_signatures.append(signature) + return errors + + @classmethod + def _check_id_field(cls): + """Check if `id` field is a primary key.""" + fields = [ + f for f in cls._meta.local_fields if f.name == "id" and f != cls._meta.pk + ] + # fields is empty or consists of the invalid "id" field + if fields and not fields[0].primary_key and cls._meta.pk.name == "id": + return [ + checks.Error( + "'id' can only be used as a field name if the field also " + "sets 'primary_key=True'.", + obj=cls, + id="models.E004", + ) + ] + else: + return [] + + @classmethod + def _check_field_name_clashes(cls): + """Forbid field shadowing in multi-table inheritance.""" + errors = [] + used_fields = {} # name or attname -> field + + # Check that multi-inheritance doesn't cause field name shadowing. + for parent in cls._meta.get_parent_list(): + for f in parent._meta.local_fields: + clash = used_fields.get(f.name) or used_fields.get(f.attname) or None + if clash: + errors.append( + checks.Error( + "The field '%s' from parent model " + "'%s' clashes with the field '%s' " + "from parent model '%s'." + % (clash.name, clash.model._meta, f.name, f.model._meta), + obj=cls, + id="models.E005", + ) + ) + used_fields[f.name] = f + used_fields[f.attname] = f + + # Check that fields defined in the model don't clash with fields from + # parents, including auto-generated fields like multi-table inheritance + # child accessors. + for parent in cls._meta.get_parent_list(): + for f in parent._meta.get_fields(): + if f not in used_fields: + used_fields[f.name] = f + + # Check that parent links in diamond-shaped MTI models don't clash. + for parent_link in cls._meta.parents.values(): + if not parent_link: + continue + clash = used_fields.get(parent_link.name) or None + if clash: + errors.append( + checks.Error( + f"The field '{parent_link.name}' clashes with the field " + f"'{clash.name}' from model '{clash.model._meta}'.", + obj=cls, + id="models.E006", + ) + ) + + for f in cls._meta.local_fields: + clash = used_fields.get(f.name) or used_fields.get(f.attname) or None + # Note that we may detect clash between user-defined non-unique + # field "id" and automatically added unique field "id", both + # defined at the same model. This special case is considered in + # _check_id_field and here we ignore it. + id_conflict = ( + f.name == "id" and clash and clash.name == "id" and clash.model == cls + ) + if clash and not id_conflict: + errors.append( + checks.Error( + "The field '%s' clashes with the field '%s' " + "from model '%s'." % (f.name, clash.name, clash.model._meta), + obj=f, + id="models.E006", + ) + ) + used_fields[f.name] = f + used_fields[f.attname] = f + + return errors + + @classmethod + def _check_column_name_clashes(cls): + # Store a list of column names which have already been used by other fields. + used_column_names = [] + errors = [] + + for f in cls._meta.local_fields: + _, column_name = f.get_attname_column() + + # Ensure the column name is not already in use. + if column_name and column_name in used_column_names: + errors.append( + checks.Error( + "Field '%s' has column name '%s' that is used by " + "another field." % (f.name, column_name), + hint="Specify a 'db_column' for the field.", + obj=cls, + id="models.E007", + ) + ) + else: + used_column_names.append(column_name) + + return errors + + @classmethod + def _check_model_name_db_lookup_clashes(cls): + errors = [] + model_name = cls.__name__ + if model_name.startswith("_") or model_name.endswith("_"): + errors.append( + checks.Error( + "The model name '%s' cannot start or end with an underscore " + "as it collides with the query lookup syntax." % model_name, + obj=cls, + id="models.E023", + ) + ) + elif LOOKUP_SEP in model_name: + errors.append( + checks.Error( + "The model name '%s' cannot contain double underscores as " + "it collides with the query lookup syntax." % model_name, + obj=cls, + id="models.E024", + ) + ) + return errors + + @classmethod + def _check_property_name_related_field_accessor_clashes(cls): + errors = [] + property_names = cls._meta._property_names + related_field_accessors = ( + f.get_attname() + for f in cls._meta._get_fields(reverse=False) + if f.is_relation and f.related_model is not None + ) + for accessor in related_field_accessors: + if accessor in property_names: + errors.append( + checks.Error( + "The property '%s' clashes with a related field " + "accessor." % accessor, + obj=cls, + id="models.E025", + ) + ) + return errors + + @classmethod + def _check_single_primary_key(cls): + errors = [] + if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: + errors.append( + checks.Error( + "The model cannot have more than one field with " + "'primary_key=True'.", + obj=cls, + id="models.E026", + ) + ) + return errors + + # RemovedInDjango51Warning. + @classmethod + def _check_index_together(cls): + """Check the value of "index_together" option.""" + if not isinstance(cls._meta.index_together, (tuple, list)): + return [ + checks.Error( + "'index_together' must be a list or tuple.", + obj=cls, + id="models.E008", + ) + ] + + elif any( + not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together + ): + return [ + checks.Error( + "All 'index_together' elements must be lists or tuples.", + obj=cls, + id="models.E009", + ) + ] + + else: + errors = [] + for fields in cls._meta.index_together: + errors.extend(cls._check_local_fields(fields, "index_together")) + return errors + + @classmethod + def _check_unique_together(cls): + """Check the value of "unique_together" option.""" + if not isinstance(cls._meta.unique_together, (tuple, list)): + return [ + checks.Error( + "'unique_together' must be a list or tuple.", + obj=cls, + id="models.E010", + ) + ] + + elif any( + not isinstance(fields, (tuple, list)) + for fields in cls._meta.unique_together + ): + return [ + checks.Error( + "All 'unique_together' elements must be lists or tuples.", + obj=cls, + id="models.E011", + ) + ] + + else: + errors = [] + for fields in cls._meta.unique_together: + errors.extend(cls._check_local_fields(fields, "unique_together")) + return errors + + @classmethod + def _check_indexes(cls, databases): + """Check fields, names, and conditions of indexes.""" + errors = [] + references = set() + for index in cls._meta.indexes: + # Index name can't start with an underscore or a number, restricted + # for cross-database compatibility with Oracle. + if index.name[0] == "_" or index.name[0].isdigit(): + errors.append( + checks.Error( + "The index name '%s' cannot start with an underscore " + "or a number." % index.name, + obj=cls, + id="models.E033", + ), + ) + if len(index.name) > index.max_name_length: + errors.append( + checks.Error( + "The index name '%s' cannot be longer than %d " + "characters." % (index.name, index.max_name_length), + obj=cls, + id="models.E034", + ), + ) + if index.contains_expressions: + for expression in index.expressions: + references.update( + ref[0] for ref in cls._get_expr_references(expression) + ) + for db in databases: + if not router.allow_migrate_model(db, cls): + continue + connection = connections[db] + if not ( + connection.features.supports_partial_indexes + or "supports_partial_indexes" in cls._meta.required_db_features + ) and any(index.condition is not None for index in cls._meta.indexes): + errors.append( + checks.Warning( + "%s does not support indexes with conditions." + % connection.display_name, + hint=( + "Conditions will be ignored. Silence this warning " + "if you don't care about it." + ), + obj=cls, + id="models.W037", + ) + ) + if not ( + connection.features.supports_covering_indexes + or "supports_covering_indexes" in cls._meta.required_db_features + ) and any(index.include for index in cls._meta.indexes): + errors.append( + checks.Warning( + "%s does not support indexes with non-key columns." + % connection.display_name, + hint=( + "Non-key columns will be ignored. Silence this " + "warning if you don't care about it." + ), + obj=cls, + id="models.W040", + ) + ) + if not ( + connection.features.supports_expression_indexes + or "supports_expression_indexes" in cls._meta.required_db_features + ) and any(index.contains_expressions for index in cls._meta.indexes): + errors.append( + checks.Warning( + "%s does not support indexes on expressions." + % connection.display_name, + hint=( + "An index won't be created. Silence this warning " + "if you don't care about it." + ), + obj=cls, + id="models.W043", + ) + ) + fields = [ + field for index in cls._meta.indexes for field, _ in index.fields_orders + ] + fields += [include for index in cls._meta.indexes for include in index.include] + fields += references + errors.extend(cls._check_local_fields(fields, "indexes")) + return errors + + @classmethod + def _check_local_fields(cls, fields, option): + from django.db import models + + # In order to avoid hitting the relation tree prematurely, we use our + # own fields_map instead of using get_field() + forward_fields_map = {} + for field in cls._meta._get_fields(reverse=False): + forward_fields_map[field.name] = field + if hasattr(field, "attname"): + forward_fields_map[field.attname] = field + + errors = [] + for field_name in fields: + try: + field = forward_fields_map[field_name] + except KeyError: + errors.append( + checks.Error( + "'%s' refers to the nonexistent field '%s'." + % ( + option, + field_name, + ), + obj=cls, + id="models.E012", + ) + ) + else: + if isinstance(field.remote_field, models.ManyToManyRel): + errors.append( + checks.Error( + "'%s' refers to a ManyToManyField '%s', but " + "ManyToManyFields are not permitted in '%s'." + % ( + option, + field_name, + option, + ), + obj=cls, + id="models.E013", + ) + ) + elif field not in cls._meta.local_fields: + errors.append( + checks.Error( + "'%s' refers to field '%s' which is not local to model " + "'%s'." % (option, field_name, cls._meta.object_name), + hint="This issue may be caused by multi-table inheritance.", + obj=cls, + id="models.E016", + ) + ) + return errors + + @classmethod + def _check_ordering(cls): + """ + Check "ordering" option -- is it a list of strings and do all fields + exist? + """ + if cls._meta._ordering_clash: + return [ + checks.Error( + "'ordering' and 'order_with_respect_to' cannot be used together.", + obj=cls, + id="models.E021", + ), + ] + + if cls._meta.order_with_respect_to or not cls._meta.ordering: + return [] + + if not isinstance(cls._meta.ordering, (list, tuple)): + return [ + checks.Error( + "'ordering' must be a tuple or list (even if you want to order by " + "only one field).", + obj=cls, + id="models.E014", + ) + ] + + errors = [] + fields = cls._meta.ordering + + # Skip expressions and '?' fields. + fields = (f for f in fields if isinstance(f, str) and f != "?") + + # Convert "-field" to "field". + fields = (f.removeprefix("-") for f in fields) + + # Separate related fields and non-related fields. + _fields = [] + related_fields = [] + for f in fields: + if LOOKUP_SEP in f: + related_fields.append(f) + else: + _fields.append(f) + fields = _fields + + # Check related fields. + for field in related_fields: + _cls = cls + fld = None + for part in field.split(LOOKUP_SEP): + try: + # pk is an alias that won't be found by opts.get_field. + if part == "pk": + fld = _cls._meta.pk + else: + fld = _cls._meta.get_field(part) + if fld.is_relation: + _cls = fld.path_infos[-1].to_opts.model + else: + _cls = None + except (FieldDoesNotExist, AttributeError): + if fld is None or ( + fld.get_transform(part) is None and fld.get_lookup(part) is None + ): + errors.append( + checks.Error( + "'ordering' refers to the nonexistent field, " + "related field, or lookup '%s'." % field, + obj=cls, + id="models.E015", + ) + ) + + # Skip ordering on pk. This is always a valid order_by field + # but is an alias and therefore won't be found by opts.get_field. + fields = {f for f in fields if f != "pk"} + + # Check for invalid or nonexistent fields in ordering. + invalid_fields = [] + + # Any field name that is not present in field_names does not exist. + # Also, ordering by m2m fields is not allowed. + opts = cls._meta + valid_fields = set( + chain.from_iterable( + (f.name, f.attname) + if not (f.auto_created and not f.concrete) + else (f.field.related_query_name(),) + for f in chain(opts.fields, opts.related_objects) + ) + ) + + invalid_fields.extend(fields - valid_fields) + + for invalid_field in invalid_fields: + errors.append( + checks.Error( + "'ordering' refers to the nonexistent field, related " + "field, or lookup '%s'." % invalid_field, + obj=cls, + id="models.E015", + ) + ) + return errors + + @classmethod + def _check_long_column_names(cls, databases): + """ + Check that any auto-generated column names are shorter than the limits + for each database in which the model will be created. + """ + if not databases: + return [] + errors = [] + allowed_len = None + db_alias = None + + # Find the minimum max allowed length among all specified db_aliases. + for db in databases: + # skip databases where the model won't be created + if not router.allow_migrate_model(db, cls): + continue + connection = connections[db] + max_name_length = connection.ops.max_name_length() + if max_name_length is None or connection.features.truncates_names: + continue + else: + if allowed_len is None: + allowed_len = max_name_length + db_alias = db + elif max_name_length < allowed_len: + allowed_len = max_name_length + db_alias = db + + if allowed_len is None: + return errors + + for f in cls._meta.local_fields: + _, column_name = f.get_attname_column() + + # Check if auto-generated name for the field is too long + # for the database. + if ( + f.db_column is None + and column_name is not None + and len(column_name) > allowed_len + ): + errors.append( + checks.Error( + 'Autogenerated column name too long for field "%s". ' + 'Maximum length is "%s" for database "%s".' + % (column_name, allowed_len, db_alias), + hint="Set the column name manually using 'db_column'.", + obj=cls, + id="models.E018", + ) + ) + + for f in cls._meta.local_many_to_many: + # Skip nonexistent models. + if isinstance(f.remote_field.through, str): + continue + + # Check if auto-generated name for the M2M field is too long + # for the database. + for m2m in f.remote_field.through._meta.local_fields: + _, rel_name = m2m.get_attname_column() + if ( + m2m.db_column is None + and rel_name is not None + and len(rel_name) > allowed_len + ): + errors.append( + checks.Error( + "Autogenerated column name too long for M2M field " + '"%s". Maximum length is "%s" for database "%s".' + % (rel_name, allowed_len, db_alias), + hint=( + "Use 'through' to create a separate model for " + "M2M and then set column_name using 'db_column'." + ), + obj=cls, + id="models.E019", + ) + ) + + return errors + + @classmethod + def _get_expr_references(cls, expr): + if isinstance(expr, Q): + for child in expr.children: + if isinstance(child, tuple): + lookup, value = child + yield tuple(lookup.split(LOOKUP_SEP)) + yield from cls._get_expr_references(value) + else: + yield from cls._get_expr_references(child) + elif isinstance(expr, F): + yield tuple(expr.name.split(LOOKUP_SEP)) + elif hasattr(expr, "get_source_expressions"): + for src_expr in expr.get_source_expressions(): + yield from cls._get_expr_references(src_expr) + + @classmethod + def _check_constraints(cls, databases): + errors = [] + for db in databases: + if not router.allow_migrate_model(db, cls): + continue + connection = connections[db] + if not ( + connection.features.supports_table_check_constraints + or "supports_table_check_constraints" in cls._meta.required_db_features + ) and any( + isinstance(constraint, CheckConstraint) + for constraint in cls._meta.constraints + ): + errors.append( + checks.Warning( + "%s does not support check constraints." + % connection.display_name, + hint=( + "A constraint won't be created. Silence this " + "warning if you don't care about it." + ), + obj=cls, + id="models.W027", + ) + ) + if not ( + connection.features.supports_partial_indexes + or "supports_partial_indexes" in cls._meta.required_db_features + ) and any( + isinstance(constraint, UniqueConstraint) + and constraint.condition is not None + for constraint in cls._meta.constraints + ): + errors.append( + checks.Warning( + "%s does not support unique constraints with " + "conditions." % connection.display_name, + hint=( + "A constraint won't be created. Silence this " + "warning if you don't care about it." + ), + obj=cls, + id="models.W036", + ) + ) + if not ( + connection.features.supports_deferrable_unique_constraints + or "supports_deferrable_unique_constraints" + in cls._meta.required_db_features + ) and any( + isinstance(constraint, UniqueConstraint) + and constraint.deferrable is not None + for constraint in cls._meta.constraints + ): + errors.append( + checks.Warning( + "%s does not support deferrable unique constraints." + % connection.display_name, + hint=( + "A constraint won't be created. Silence this " + "warning if you don't care about it." + ), + obj=cls, + id="models.W038", + ) + ) + if not ( + connection.features.supports_covering_indexes + or "supports_covering_indexes" in cls._meta.required_db_features + ) and any( + isinstance(constraint, UniqueConstraint) and constraint.include + for constraint in cls._meta.constraints + ): + errors.append( + checks.Warning( + "%s does not support unique constraints with non-key " + "columns." % connection.display_name, + hint=( + "A constraint won't be created. Silence this " + "warning if you don't care about it." + ), + obj=cls, + id="models.W039", + ) + ) + if not ( + connection.features.supports_expression_indexes + or "supports_expression_indexes" in cls._meta.required_db_features + ) and any( + isinstance(constraint, UniqueConstraint) + and constraint.contains_expressions + for constraint in cls._meta.constraints + ): + errors.append( + checks.Warning( + "%s does not support unique constraints on " + "expressions." % connection.display_name, + hint=( + "A constraint won't be created. Silence this " + "warning if you don't care about it." + ), + obj=cls, + id="models.W044", + ) + ) + fields = set( + chain.from_iterable( + (*constraint.fields, *constraint.include) + for constraint in cls._meta.constraints + if isinstance(constraint, UniqueConstraint) + ) + ) + references = set() + for constraint in cls._meta.constraints: + if isinstance(constraint, UniqueConstraint): + if ( + connection.features.supports_partial_indexes + or "supports_partial_indexes" + not in cls._meta.required_db_features + ) and isinstance(constraint.condition, Q): + references.update( + cls._get_expr_references(constraint.condition) + ) + if ( + connection.features.supports_expression_indexes + or "supports_expression_indexes" + not in cls._meta.required_db_features + ) and constraint.contains_expressions: + for expression in constraint.expressions: + references.update(cls._get_expr_references(expression)) + elif isinstance(constraint, CheckConstraint): + if ( + connection.features.supports_table_check_constraints + or "supports_table_check_constraints" + not in cls._meta.required_db_features + ): + if isinstance(constraint.check, Q): + references.update( + cls._get_expr_references(constraint.check) + ) + if any( + isinstance(expr, RawSQL) + for expr in constraint.check.flatten() + ): + errors.append( + checks.Warning( + f"Check constraint {constraint.name!r} contains " + f"RawSQL() expression and won't be validated " + f"during the model full_clean().", + hint=( + "Silence this warning if you don't care about " + "it." + ), + obj=cls, + id="models.W045", + ), + ) + for field_name, *lookups in references: + # pk is an alias that won't be found by opts.get_field. + if field_name != "pk": + fields.add(field_name) + if not lookups: + # If it has no lookups it cannot result in a JOIN. + continue + try: + if field_name == "pk": + field = cls._meta.pk + else: + field = cls._meta.get_field(field_name) + if not field.is_relation or field.many_to_many or field.one_to_many: + continue + except FieldDoesNotExist: + continue + # JOIN must happen at the first lookup. + first_lookup = lookups[0] + if ( + hasattr(field, "get_transform") + and hasattr(field, "get_lookup") + and field.get_transform(first_lookup) is None + and field.get_lookup(first_lookup) is None + ): + errors.append( + checks.Error( + "'constraints' refers to the joined field '%s'." + % LOOKUP_SEP.join([field_name] + lookups), + obj=cls, + id="models.E041", + ) + ) + errors.extend(cls._check_local_fields(fields, "constraints")) + return errors + + +############################################ +# HELPER FUNCTIONS (CURRIED MODEL METHODS) # +############################################ + +# ORDERING METHODS ######################### + + +def method_set_order(self, ordered_obj, id_list, using=None): + order_wrt = ordered_obj._meta.order_with_respect_to + filter_args = order_wrt.get_forward_related_filter(self) + ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update( + [ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list)], + ["_order"], + ) + + +def method_get_order(self, ordered_obj): + order_wrt = ordered_obj._meta.order_with_respect_to + filter_args = order_wrt.get_forward_related_filter(self) + pk_name = ordered_obj._meta.pk.name + return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) + + +def make_foreign_order_accessors(model, related_model): + setattr( + related_model, + "get_%s_order" % model.__name__.lower(), + partialmethod(method_get_order, model), + ) + setattr( + related_model, + "set_%s_order" % model.__name__.lower(), + partialmethod(method_set_order, model), + ) + + +######## +# MISC # +######## + + +def model_unpickle(model_id): + """Used to unpickle Model subclasses with deferred fields.""" + if isinstance(model_id, tuple): + model = apps.get_model(*model_id) + else: + # Backwards compat - the model was cached directly in earlier versions. + model = model_id + return model.__new__(model) + + +model_unpickle.__safe_for_unpickle__ = True diff --git a/testbed/django__django/django/db/models/constants.py b/testbed/django__django/django/db/models/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..a0c99c95fc7d33bbbc965323e9d5a961b7199e1e --- /dev/null +++ b/testbed/django__django/django/db/models/constants.py @@ -0,0 +1,12 @@ +""" +Constants used across the ORM in general. +""" +from enum import Enum + +# Separator used to split filter strings apart. +LOOKUP_SEP = "__" + + +class OnConflict(Enum): + IGNORE = "ignore" + UPDATE = "update" diff --git a/testbed/django__django/django/db/models/constraints.py b/testbed/django__django/django/db/models/constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..0df0782b6f1f4881fb67c97ce69bbd78a9c98daf --- /dev/null +++ b/testbed/django__django/django/db/models/constraints.py @@ -0,0 +1,445 @@ +import warnings +from enum import Enum +from types import NoneType + +from django.core.exceptions import FieldError, ValidationError +from django.db import connections +from django.db.models.expressions import Exists, ExpressionList, F, OrderBy +from django.db.models.indexes import IndexExpression +from django.db.models.lookups import Exact +from django.db.models.query_utils import Q +from django.db.models.sql.query import Query +from django.db.utils import DEFAULT_DB_ALIAS +from django.utils.deprecation import RemovedInDjango60Warning +from django.utils.translation import gettext_lazy as _ + +__all__ = ["BaseConstraint", "CheckConstraint", "Deferrable", "UniqueConstraint"] + + +class BaseConstraint: + default_violation_error_message = _("Constraint “%(name)s” is violated.") + violation_error_code = None + violation_error_message = None + + # RemovedInDjango60Warning: When the deprecation ends, replace with: + # def __init__( + # self, *, name, violation_error_code=None, violation_error_message=None + # ): + def __init__( + self, *args, name=None, violation_error_code=None, violation_error_message=None + ): + # RemovedInDjango60Warning. + if name is None and not args: + raise TypeError( + f"{self.__class__.__name__}.__init__() missing 1 required keyword-only " + f"argument: 'name'" + ) + self.name = name + if violation_error_code is not None: + self.violation_error_code = violation_error_code + if violation_error_message is not None: + self.violation_error_message = violation_error_message + else: + self.violation_error_message = self.default_violation_error_message + # RemovedInDjango60Warning. + if args: + warnings.warn( + f"Passing positional arguments to {self.__class__.__name__} is " + f"deprecated.", + RemovedInDjango60Warning, + stacklevel=2, + ) + for arg, attr in zip(args, ["name", "violation_error_message"]): + if arg: + setattr(self, attr, arg) + + @property + def contains_expressions(self): + return False + + def constraint_sql(self, model, schema_editor): + raise NotImplementedError("This method must be implemented by a subclass.") + + def create_sql(self, model, schema_editor): + raise NotImplementedError("This method must be implemented by a subclass.") + + def remove_sql(self, model, schema_editor): + raise NotImplementedError("This method must be implemented by a subclass.") + + def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): + raise NotImplementedError("This method must be implemented by a subclass.") + + def get_violation_error_message(self): + return self.violation_error_message % {"name": self.name} + + def deconstruct(self): + path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) + path = path.replace("django.db.models.constraints", "django.db.models") + kwargs = {"name": self.name} + if ( + self.violation_error_message is not None + and self.violation_error_message != self.default_violation_error_message + ): + kwargs["violation_error_message"] = self.violation_error_message + if self.violation_error_code is not None: + kwargs["violation_error_code"] = self.violation_error_code + return (path, (), kwargs) + + def clone(self): + _, args, kwargs = self.deconstruct() + return self.__class__(*args, **kwargs) + + +class CheckConstraint(BaseConstraint): + def __init__( + self, *, check, name, violation_error_code=None, violation_error_message=None + ): + self.check = check + if not getattr(check, "conditional", False): + raise TypeError( + "CheckConstraint.check must be a Q instance or boolean expression." + ) + super().__init__( + name=name, + violation_error_code=violation_error_code, + violation_error_message=violation_error_message, + ) + + def _get_check_sql(self, model, schema_editor): + query = Query(model=model, alias_cols=False) + where = query.build_where(self.check) + compiler = query.get_compiler(connection=schema_editor.connection) + sql, params = where.as_sql(compiler, schema_editor.connection) + return sql % tuple(schema_editor.quote_value(p) for p in params) + + def constraint_sql(self, model, schema_editor): + check = self._get_check_sql(model, schema_editor) + return schema_editor._check_sql(self.name, check) + + def create_sql(self, model, schema_editor): + check = self._get_check_sql(model, schema_editor) + return schema_editor._create_check_sql(model, self.name, check) + + def remove_sql(self, model, schema_editor): + return schema_editor._delete_check_sql(model, self.name) + + def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): + against = instance._get_field_value_map(meta=model._meta, exclude=exclude) + try: + if not Q(self.check).check(against, using=using): + raise ValidationError( + self.get_violation_error_message(), code=self.violation_error_code + ) + except FieldError: + pass + + def __repr__(self): + return "<%s: check=%s name=%s%s%s>" % ( + self.__class__.__qualname__, + self.check, + repr(self.name), + ( + "" + if self.violation_error_code is None + else " violation_error_code=%r" % self.violation_error_code + ), + ( + "" + if self.violation_error_message is None + or self.violation_error_message == self.default_violation_error_message + else " violation_error_message=%r" % self.violation_error_message + ), + ) + + def __eq__(self, other): + if isinstance(other, CheckConstraint): + return ( + self.name == other.name + and self.check == other.check + and self.violation_error_code == other.violation_error_code + and self.violation_error_message == other.violation_error_message + ) + return super().__eq__(other) + + def deconstruct(self): + path, args, kwargs = super().deconstruct() + kwargs["check"] = self.check + return path, args, kwargs + + +class Deferrable(Enum): + DEFERRED = "deferred" + IMMEDIATE = "immediate" + + # A similar format was proposed for Python 3.10. + def __repr__(self): + return f"{self.__class__.__qualname__}.{self._name_}" + + +class UniqueConstraint(BaseConstraint): + def __init__( + self, + *expressions, + fields=(), + name=None, + condition=None, + deferrable=None, + include=None, + opclasses=(), + violation_error_code=None, + violation_error_message=None, + ): + if not name: + raise ValueError("A unique constraint must be named.") + if not expressions and not fields: + raise ValueError( + "At least one field or expression is required to define a " + "unique constraint." + ) + if expressions and fields: + raise ValueError( + "UniqueConstraint.fields and expressions are mutually exclusive." + ) + if not isinstance(condition, (NoneType, Q)): + raise ValueError("UniqueConstraint.condition must be a Q instance.") + if condition and deferrable: + raise ValueError("UniqueConstraint with conditions cannot be deferred.") + if include and deferrable: + raise ValueError("UniqueConstraint with include fields cannot be deferred.") + if opclasses and deferrable: + raise ValueError("UniqueConstraint with opclasses cannot be deferred.") + if expressions and deferrable: + raise ValueError("UniqueConstraint with expressions cannot be deferred.") + if expressions and opclasses: + raise ValueError( + "UniqueConstraint.opclasses cannot be used with expressions. " + "Use django.contrib.postgres.indexes.OpClass() instead." + ) + if not isinstance(deferrable, (NoneType, Deferrable)): + raise ValueError( + "UniqueConstraint.deferrable must be a Deferrable instance." + ) + if not isinstance(include, (NoneType, list, tuple)): + raise ValueError("UniqueConstraint.include must be a list or tuple.") + if not isinstance(opclasses, (list, tuple)): + raise ValueError("UniqueConstraint.opclasses must be a list or tuple.") + if opclasses and len(fields) != len(opclasses): + raise ValueError( + "UniqueConstraint.fields and UniqueConstraint.opclasses must " + "have the same number of elements." + ) + self.fields = tuple(fields) + self.condition = condition + self.deferrable = deferrable + self.include = tuple(include) if include else () + self.opclasses = opclasses + self.expressions = tuple( + F(expression) if isinstance(expression, str) else expression + for expression in expressions + ) + super().__init__( + name=name, + violation_error_code=violation_error_code, + violation_error_message=violation_error_message, + ) + + @property + def contains_expressions(self): + return bool(self.expressions) + + def _get_condition_sql(self, model, schema_editor): + if self.condition is None: + return None + query = Query(model=model, alias_cols=False) + where = query.build_where(self.condition) + compiler = query.get_compiler(connection=schema_editor.connection) + sql, params = where.as_sql(compiler, schema_editor.connection) + return sql % tuple(schema_editor.quote_value(p) for p in params) + + def _get_index_expressions(self, model, schema_editor): + if not self.expressions: + return None + index_expressions = [] + for expression in self.expressions: + index_expression = IndexExpression(expression) + index_expression.set_wrapper_classes(schema_editor.connection) + index_expressions.append(index_expression) + return ExpressionList(*index_expressions).resolve_expression( + Query(model, alias_cols=False), + ) + + def constraint_sql(self, model, schema_editor): + fields = [model._meta.get_field(field_name) for field_name in self.fields] + include = [ + model._meta.get_field(field_name).column for field_name in self.include + ] + condition = self._get_condition_sql(model, schema_editor) + expressions = self._get_index_expressions(model, schema_editor) + return schema_editor._unique_sql( + model, + fields, + self.name, + condition=condition, + deferrable=self.deferrable, + include=include, + opclasses=self.opclasses, + expressions=expressions, + ) + + def create_sql(self, model, schema_editor): + fields = [model._meta.get_field(field_name) for field_name in self.fields] + include = [ + model._meta.get_field(field_name).column for field_name in self.include + ] + condition = self._get_condition_sql(model, schema_editor) + expressions = self._get_index_expressions(model, schema_editor) + return schema_editor._create_unique_sql( + model, + fields, + self.name, + condition=condition, + deferrable=self.deferrable, + include=include, + opclasses=self.opclasses, + expressions=expressions, + ) + + def remove_sql(self, model, schema_editor): + condition = self._get_condition_sql(model, schema_editor) + include = [ + model._meta.get_field(field_name).column for field_name in self.include + ] + expressions = self._get_index_expressions(model, schema_editor) + return schema_editor._delete_unique_sql( + model, + self.name, + condition=condition, + deferrable=self.deferrable, + include=include, + opclasses=self.opclasses, + expressions=expressions, + ) + + def __repr__(self): + return "<%s:%s%s%s%s%s%s%s%s%s>" % ( + self.__class__.__qualname__, + "" if not self.fields else " fields=%s" % repr(self.fields), + "" if not self.expressions else " expressions=%s" % repr(self.expressions), + " name=%s" % repr(self.name), + "" if self.condition is None else " condition=%s" % self.condition, + "" if self.deferrable is None else " deferrable=%r" % self.deferrable, + "" if not self.include else " include=%s" % repr(self.include), + "" if not self.opclasses else " opclasses=%s" % repr(self.opclasses), + ( + "" + if self.violation_error_code is None + else " violation_error_code=%r" % self.violation_error_code + ), + ( + "" + if self.violation_error_message is None + or self.violation_error_message == self.default_violation_error_message + else " violation_error_message=%r" % self.violation_error_message + ), + ) + + def __eq__(self, other): + if isinstance(other, UniqueConstraint): + return ( + self.name == other.name + and self.fields == other.fields + and self.condition == other.condition + and self.deferrable == other.deferrable + and self.include == other.include + and self.opclasses == other.opclasses + and self.expressions == other.expressions + and self.violation_error_code == other.violation_error_code + and self.violation_error_message == other.violation_error_message + ) + return super().__eq__(other) + + def deconstruct(self): + path, args, kwargs = super().deconstruct() + if self.fields: + kwargs["fields"] = self.fields + if self.condition: + kwargs["condition"] = self.condition + if self.deferrable: + kwargs["deferrable"] = self.deferrable + if self.include: + kwargs["include"] = self.include + if self.opclasses: + kwargs["opclasses"] = self.opclasses + return path, self.expressions, kwargs + + def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): + queryset = model._default_manager.using(using) + if self.fields: + lookup_kwargs = {} + for field_name in self.fields: + if exclude and field_name in exclude: + return + field = model._meta.get_field(field_name) + lookup_value = getattr(instance, field.attname) + if lookup_value is None or ( + lookup_value == "" + and connections[using].features.interprets_empty_strings_as_nulls + ): + # A composite constraint containing NULL value cannot cause + # a violation since NULL != NULL in SQL. + return + lookup_kwargs[field.name] = lookup_value + queryset = queryset.filter(**lookup_kwargs) + else: + # Ignore constraints with excluded fields. + if exclude: + for expression in self.expressions: + if hasattr(expression, "flatten"): + for expr in expression.flatten(): + if isinstance(expr, F) and expr.name in exclude: + return + elif isinstance(expression, F) and expression.name in exclude: + return + replacements = { + F(field): value + for field, value in instance._get_field_value_map( + meta=model._meta, exclude=exclude + ).items() + } + expressions = [] + for expr in self.expressions: + # Ignore ordering. + if isinstance(expr, OrderBy): + expr = expr.expression + expressions.append(Exact(expr, expr.replace_expressions(replacements))) + queryset = queryset.filter(*expressions) + model_class_pk = instance._get_pk_val(model._meta) + if not instance._state.adding and model_class_pk is not None: + queryset = queryset.exclude(pk=model_class_pk) + if not self.condition: + if queryset.exists(): + if self.expressions: + raise ValidationError( + self.get_violation_error_message(), + code=self.violation_error_code, + ) + # When fields are defined, use the unique_error_message() for + # backward compatibility. + for model, constraints in instance.get_constraints(): + for constraint in constraints: + if constraint is self: + raise ValidationError( + instance.unique_error_message(model, self.fields), + ) + else: + against = instance._get_field_value_map(meta=model._meta, exclude=exclude) + try: + if (self.condition & Exists(queryset.filter(self.condition))).check( + against, using=using + ): + raise ValidationError( + self.get_violation_error_message(), + code=self.violation_error_code, + ) + except FieldError: + pass diff --git a/testbed/django__django/django/db/models/deletion.py b/testbed/django__django/django/db/models/deletion.py new file mode 100644 index 0000000000000000000000000000000000000000..bc26d82e934c95e9ec441603be5cc436b8fa2cbd --- /dev/null +++ b/testbed/django__django/django/db/models/deletion.py @@ -0,0 +1,522 @@ +from collections import Counter, defaultdict +from functools import partial, reduce +from itertools import chain +from operator import attrgetter, or_ + +from django.db import IntegrityError, connections, models, transaction +from django.db.models import query_utils, signals, sql + + +class ProtectedError(IntegrityError): + def __init__(self, msg, protected_objects): + self.protected_objects = protected_objects + super().__init__(msg, protected_objects) + + +class RestrictedError(IntegrityError): + def __init__(self, msg, restricted_objects): + self.restricted_objects = restricted_objects + super().__init__(msg, restricted_objects) + + +def CASCADE(collector, field, sub_objs, using): + collector.collect( + sub_objs, + source=field.remote_field.model, + source_attr=field.name, + nullable=field.null, + fail_on_restricted=False, + ) + if field.null and not connections[using].features.can_defer_constraint_checks: + collector.add_field_update(field, None, sub_objs) + + +def PROTECT(collector, field, sub_objs, using): + raise ProtectedError( + "Cannot delete some instances of model '%s' because they are " + "referenced through a protected foreign key: '%s.%s'" + % ( + field.remote_field.model.__name__, + sub_objs[0].__class__.__name__, + field.name, + ), + sub_objs, + ) + + +def RESTRICT(collector, field, sub_objs, using): + collector.add_restricted_objects(field, sub_objs) + collector.add_dependency(field.remote_field.model, field.model) + + +def SET(value): + if callable(value): + + def set_on_delete(collector, field, sub_objs, using): + collector.add_field_update(field, value(), sub_objs) + + else: + + def set_on_delete(collector, field, sub_objs, using): + collector.add_field_update(field, value, sub_objs) + + set_on_delete.deconstruct = lambda: ("django.db.models.SET", (value,), {}) + set_on_delete.lazy_sub_objs = True + return set_on_delete + + +def SET_NULL(collector, field, sub_objs, using): + collector.add_field_update(field, None, sub_objs) + + +SET_NULL.lazy_sub_objs = True + + +def SET_DEFAULT(collector, field, sub_objs, using): + collector.add_field_update(field, field.get_default(), sub_objs) + + +SET_DEFAULT.lazy_sub_objs = True + + +def DO_NOTHING(collector, field, sub_objs, using): + pass + + +def get_candidate_relations_to_delete(opts): + # The candidate relations are the ones that come from N-1 and 1-1 relations. + # N-N (i.e., many-to-many) relations aren't candidates for deletion. + return ( + f + for f in opts.get_fields(include_hidden=True) + if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many) + ) + + +class Collector: + def __init__(self, using, origin=None): + self.using = using + # A Model or QuerySet object. + self.origin = origin + # Initially, {model: {instances}}, later values become lists. + self.data = defaultdict(set) + # {(field, value): [instances, …]} + self.field_updates = defaultdict(list) + # {model: {field: {instances}}} + self.restricted_objects = defaultdict(partial(defaultdict, set)) + # fast_deletes is a list of queryset-likes that can be deleted without + # fetching the objects into memory. + self.fast_deletes = [] + + # Tracks deletion-order dependency for databases without transactions + # or ability to defer constraint checks. Only concrete model classes + # should be included, as the dependencies exist only between actual + # database tables; proxy models are represented here by their concrete + # parent. + self.dependencies = defaultdict(set) # {model: {models}} + + def add(self, objs, source=None, nullable=False, reverse_dependency=False): + """ + Add 'objs' to the collection of objects to be deleted. If the call is + the result of a cascade, 'source' should be the model that caused it, + and 'nullable' should be set to True if the relation can be null. + + Return a list of all objects that were not already collected. + """ + if not objs: + return [] + new_objs = [] + model = objs[0].__class__ + instances = self.data[model] + for obj in objs: + if obj not in instances: + new_objs.append(obj) + instances.update(new_objs) + # Nullable relationships can be ignored -- they are nulled out before + # deleting, and therefore do not affect the order in which objects have + # to be deleted. + if source is not None and not nullable: + self.add_dependency(source, model, reverse_dependency=reverse_dependency) + return new_objs + + def add_dependency(self, model, dependency, reverse_dependency=False): + if reverse_dependency: + model, dependency = dependency, model + self.dependencies[model._meta.concrete_model].add( + dependency._meta.concrete_model + ) + self.data.setdefault(dependency, self.data.default_factory()) + + def add_field_update(self, field, value, objs): + """ + Schedule a field update. 'objs' must be a homogeneous iterable + collection of model instances (e.g. a QuerySet). + """ + self.field_updates[field, value].append(objs) + + def add_restricted_objects(self, field, objs): + if objs: + model = objs[0].__class__ + self.restricted_objects[model][field].update(objs) + + def clear_restricted_objects_from_set(self, model, objs): + if model in self.restricted_objects: + self.restricted_objects[model] = { + field: items - objs + for field, items in self.restricted_objects[model].items() + } + + def clear_restricted_objects_from_queryset(self, model, qs): + if model in self.restricted_objects: + objs = set( + qs.filter( + pk__in=[ + obj.pk + for objs in self.restricted_objects[model].values() + for obj in objs + ] + ) + ) + self.clear_restricted_objects_from_set(model, objs) + + def _has_signal_listeners(self, model): + return signals.pre_delete.has_listeners( + model + ) or signals.post_delete.has_listeners(model) + + def can_fast_delete(self, objs, from_field=None): + """ + Determine if the objects in the given queryset-like or single object + can be fast-deleted. This can be done if there are no cascades, no + parents and no signal listeners for the object class. + + The 'from_field' tells where we are coming from - we need this to + determine if the objects are in fact to be deleted. Allow also + skipping parent -> child -> parent chain preventing fast delete of + the child. + """ + if from_field and from_field.remote_field.on_delete is not CASCADE: + return False + if hasattr(objs, "_meta"): + model = objs._meta.model + elif hasattr(objs, "model") and hasattr(objs, "_raw_delete"): + model = objs.model + else: + return False + if self._has_signal_listeners(model): + return False + # The use of from_field comes from the need to avoid cascade back to + # parent when parent delete is cascading to child. + opts = model._meta + return ( + all( + link == from_field + for link in opts.concrete_model._meta.parents.values() + ) + and + # Foreign keys pointing to this model. + all( + related.field.remote_field.on_delete is DO_NOTHING + for related in get_candidate_relations_to_delete(opts) + ) + and ( + # Something like generic foreign key. + not any( + hasattr(field, "bulk_related_objects") + for field in opts.private_fields + ) + ) + ) + + def get_del_batches(self, objs, fields): + """ + Return the objs in suitably sized batches for the used connection. + """ + field_names = [field.name for field in fields] + conn_batch_size = max( + connections[self.using].ops.bulk_batch_size(field_names, objs), 1 + ) + if len(objs) > conn_batch_size: + return [ + objs[i : i + conn_batch_size] + for i in range(0, len(objs), conn_batch_size) + ] + else: + return [objs] + + def collect( + self, + objs, + source=None, + nullable=False, + collect_related=True, + source_attr=None, + reverse_dependency=False, + keep_parents=False, + fail_on_restricted=True, + ): + """ + Add 'objs' to the collection of objects to be deleted as well as all + parent instances. 'objs' must be a homogeneous iterable collection of + model instances (e.g. a QuerySet). If 'collect_related' is True, + related objects will be handled by their respective on_delete handler. + + If the call is the result of a cascade, 'source' should be the model + that caused it and 'nullable' should be set to True, if the relation + can be null. + + If 'reverse_dependency' is True, 'source' will be deleted before the + current model, rather than after. (Needed for cascading to parent + models, the one case in which the cascade follows the forwards + direction of an FK rather than the reverse direction.) + + If 'keep_parents' is True, data of parent model's will be not deleted. + + If 'fail_on_restricted' is False, error won't be raised even if it's + prohibited to delete such objects due to RESTRICT, that defers + restricted object checking in recursive calls where the top-level call + may need to collect more objects to determine whether restricted ones + can be deleted. + """ + if self.can_fast_delete(objs): + self.fast_deletes.append(objs) + return + new_objs = self.add( + objs, source, nullable, reverse_dependency=reverse_dependency + ) + if not new_objs: + return + + model = new_objs[0].__class__ + + if not keep_parents: + # Recursively collect concrete model's parent models, but not their + # related objects. These will be found by meta.get_fields() + concrete_model = model._meta.concrete_model + for ptr in concrete_model._meta.parents.values(): + if ptr: + parent_objs = [getattr(obj, ptr.name) for obj in new_objs] + self.collect( + parent_objs, + source=model, + source_attr=ptr.remote_field.related_name, + collect_related=False, + reverse_dependency=True, + fail_on_restricted=False, + ) + if not collect_related: + return + + if keep_parents: + parents = set(model._meta.get_parent_list()) + model_fast_deletes = defaultdict(list) + protected_objects = defaultdict(list) + for related in get_candidate_relations_to_delete(model._meta): + # Preserve parent reverse relationships if keep_parents=True. + if keep_parents and related.model in parents: + continue + field = related.field + on_delete = field.remote_field.on_delete + if on_delete == DO_NOTHING: + continue + related_model = related.related_model + if self.can_fast_delete(related_model, from_field=field): + model_fast_deletes[related_model].append(field) + continue + batches = self.get_del_batches(new_objs, [field]) + for batch in batches: + sub_objs = self.related_objects(related_model, [field], batch) + # Non-referenced fields can be deferred if no signal receivers + # are connected for the related model as they'll never be + # exposed to the user. Skip field deferring when some + # relationships are select_related as interactions between both + # features are hard to get right. This should only happen in + # the rare cases where .related_objects is overridden anyway. + if not ( + sub_objs.query.select_related + or self._has_signal_listeners(related_model) + ): + referenced_fields = set( + chain.from_iterable( + (rf.attname for rf in rel.field.foreign_related_fields) + for rel in get_candidate_relations_to_delete( + related_model._meta + ) + ) + ) + sub_objs = sub_objs.only(*tuple(referenced_fields)) + if getattr(on_delete, "lazy_sub_objs", False) or sub_objs: + try: + on_delete(self, field, sub_objs, self.using) + except ProtectedError as error: + key = "'%s.%s'" % (field.model.__name__, field.name) + protected_objects[key] += error.protected_objects + if protected_objects: + raise ProtectedError( + "Cannot delete some instances of model %r because they are " + "referenced through protected foreign keys: %s." + % ( + model.__name__, + ", ".join(protected_objects), + ), + set(chain.from_iterable(protected_objects.values())), + ) + for related_model, related_fields in model_fast_deletes.items(): + batches = self.get_del_batches(new_objs, related_fields) + for batch in batches: + sub_objs = self.related_objects(related_model, related_fields, batch) + self.fast_deletes.append(sub_objs) + for field in model._meta.private_fields: + if hasattr(field, "bulk_related_objects"): + # It's something like generic foreign key. + sub_objs = field.bulk_related_objects(new_objs, self.using) + self.collect( + sub_objs, source=model, nullable=True, fail_on_restricted=False + ) + + if fail_on_restricted: + # Raise an error if collected restricted objects (RESTRICT) aren't + # candidates for deletion also collected via CASCADE. + for related_model, instances in self.data.items(): + self.clear_restricted_objects_from_set(related_model, instances) + for qs in self.fast_deletes: + self.clear_restricted_objects_from_queryset(qs.model, qs) + if self.restricted_objects.values(): + restricted_objects = defaultdict(list) + for related_model, fields in self.restricted_objects.items(): + for field, objs in fields.items(): + if objs: + key = "'%s.%s'" % (related_model.__name__, field.name) + restricted_objects[key] += objs + if restricted_objects: + raise RestrictedError( + "Cannot delete some instances of model %r because " + "they are referenced through restricted foreign keys: " + "%s." + % ( + model.__name__, + ", ".join(restricted_objects), + ), + set(chain.from_iterable(restricted_objects.values())), + ) + + def related_objects(self, related_model, related_fields, objs): + """ + Get a QuerySet of the related model to objs via related fields. + """ + predicate = query_utils.Q.create( + [(f"{related_field.name}__in", objs) for related_field in related_fields], + connector=query_utils.Q.OR, + ) + return related_model._base_manager.using(self.using).filter(predicate) + + def instances_with_model(self): + for model, instances in self.data.items(): + for obj in instances: + yield model, obj + + def sort(self): + sorted_models = [] + concrete_models = set() + models = list(self.data) + while len(sorted_models) < len(models): + found = False + for model in models: + if model in sorted_models: + continue + dependencies = self.dependencies.get(model._meta.concrete_model) + if not (dependencies and dependencies.difference(concrete_models)): + sorted_models.append(model) + concrete_models.add(model._meta.concrete_model) + found = True + if not found: + return + self.data = {model: self.data[model] for model in sorted_models} + + def delete(self): + # sort instance collections + for model, instances in self.data.items(): + self.data[model] = sorted(instances, key=attrgetter("pk")) + + # if possible, bring the models in an order suitable for databases that + # don't support transactions or cannot defer constraint checks until the + # end of a transaction. + self.sort() + # number of objects deleted for each model label + deleted_counter = Counter() + + # Optimize for the case with a single obj and no dependencies + if len(self.data) == 1 and len(instances) == 1: + instance = list(instances)[0] + if self.can_fast_delete(instance): + with transaction.mark_for_rollback_on_error(self.using): + count = sql.DeleteQuery(model).delete_batch( + [instance.pk], self.using + ) + setattr(instance, model._meta.pk.attname, None) + return count, {model._meta.label: count} + + with transaction.atomic(using=self.using, savepoint=False): + # send pre_delete signals + for model, obj in self.instances_with_model(): + if not model._meta.auto_created: + signals.pre_delete.send( + sender=model, + instance=obj, + using=self.using, + origin=self.origin, + ) + + # fast deletes + for qs in self.fast_deletes: + count = qs._raw_delete(using=self.using) + if count: + deleted_counter[qs.model._meta.label] += count + + # update fields + for (field, value), instances_list in self.field_updates.items(): + updates = [] + objs = [] + for instances in instances_list: + if ( + isinstance(instances, models.QuerySet) + and instances._result_cache is None + ): + updates.append(instances) + else: + objs.extend(instances) + if updates: + combined_updates = reduce(or_, updates) + combined_updates.update(**{field.name: value}) + if objs: + model = objs[0].__class__ + query = sql.UpdateQuery(model) + query.update_batch( + list({obj.pk for obj in objs}), {field.name: value}, self.using + ) + + # reverse instance collections + for instances in self.data.values(): + instances.reverse() + + # delete instances + for model, instances in self.data.items(): + query = sql.DeleteQuery(model) + pk_list = [obj.pk for obj in instances] + count = query.delete_batch(pk_list, self.using) + if count: + deleted_counter[model._meta.label] += count + + if not model._meta.auto_created: + for obj in instances: + signals.post_delete.send( + sender=model, + instance=obj, + using=self.using, + origin=self.origin, + ) + + for model, instances in self.data.items(): + for instance in instances: + setattr(instance, model._meta.pk.attname, None) + return sum(deleted_counter.values()), dict(deleted_counter) diff --git a/testbed/django__django/django/db/models/enums.py b/testbed/django__django/django/db/models/enums.py new file mode 100644 index 0000000000000000000000000000000000000000..9a7a2bb70fe4b91ac4dc7d6b160a7160c949e7a6 --- /dev/null +++ b/testbed/django__django/django/db/models/enums.py @@ -0,0 +1,92 @@ +import enum +from types import DynamicClassAttribute + +from django.utils.functional import Promise + +__all__ = ["Choices", "IntegerChoices", "TextChoices"] + + +class ChoicesMeta(enum.EnumMeta): + """A metaclass for creating a enum choices.""" + + def __new__(metacls, classname, bases, classdict, **kwds): + labels = [] + for key in classdict._member_names: + value = classdict[key] + if ( + isinstance(value, (list, tuple)) + and len(value) > 1 + and isinstance(value[-1], (Promise, str)) + ): + *value, label = value + value = tuple(value) + else: + label = key.replace("_", " ").title() + labels.append(label) + # Use dict.__setitem__() to suppress defenses against double + # assignment in enum's classdict. + dict.__setitem__(classdict, key, value) + cls = super().__new__(metacls, classname, bases, classdict, **kwds) + for member, label in zip(cls.__members__.values(), labels): + member._label_ = label + return enum.unique(cls) + + def __contains__(cls, member): + if not isinstance(member, enum.Enum): + # Allow non-enums to match against member values. + return any(x.value == member for x in cls) + return super().__contains__(member) + + @property + def names(cls): + empty = ["__empty__"] if hasattr(cls, "__empty__") else [] + return empty + [member.name for member in cls] + + @property + def choices(cls): + empty = [(None, cls.__empty__)] if hasattr(cls, "__empty__") else [] + return empty + [(member.value, member.label) for member in cls] + + @property + def labels(cls): + return [label for _, label in cls.choices] + + @property + def values(cls): + return [value for value, _ in cls.choices] + + +class Choices(enum.Enum, metaclass=ChoicesMeta): + """Class for creating enumerated choices.""" + + @DynamicClassAttribute + def label(self): + return self._label_ + + @property + def do_not_call_in_templates(self): + return True + + def __str__(self): + """ + Use value when cast to str, so that Choices set as model instance + attributes are rendered as expected in templates and similar contexts. + """ + return str(self.value) + + # A similar format was proposed for Python 3.10. + def __repr__(self): + return f"{self.__class__.__qualname__}.{self._name_}" + + +class IntegerChoices(int, Choices): + """Class for creating enumerated integer choices.""" + + pass + + +class TextChoices(str, Choices): + """Class for creating enumerated string choices.""" + + def _generate_next_value_(name, start, count, last_values): + return name diff --git a/testbed/django__django/django/db/models/expressions.py b/testbed/django__django/django/db/models/expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..280cb967b4ea673b301ba2014b9f2a3692983901 --- /dev/null +++ b/testbed/django__django/django/db/models/expressions.py @@ -0,0 +1,1890 @@ +import copy +import datetime +import functools +import inspect +from collections import defaultdict +from decimal import Decimal +from types import NoneType +from uuid import UUID + +from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet +from django.db import DatabaseError, NotSupportedError, connection +from django.db.models import fields +from django.db.models.constants import LOOKUP_SEP +from django.db.models.query_utils import Q +from django.utils.deconstruct import deconstructible +from django.utils.functional import cached_property +from django.utils.hashable import make_hashable + + +class SQLiteNumericMixin: + """ + Some expressions with output_field=DecimalField() must be cast to + numeric to be properly filtered. + """ + + def as_sqlite(self, compiler, connection, **extra_context): + sql, params = self.as_sql(compiler, connection, **extra_context) + try: + if self.output_field.get_internal_type() == "DecimalField": + sql = "CAST(%s AS NUMERIC)" % sql + except FieldError: + pass + return sql, params + + +class Combinable: + """ + Provide the ability to combine one or two objects with + some connector. For example F('foo') + F('bar'). + """ + + # Arithmetic connectors + ADD = "+" + SUB = "-" + MUL = "*" + DIV = "/" + POW = "^" + # The following is a quoted % operator - it is quoted because it can be + # used in strings that also have parameter substitution. + MOD = "%%" + + # Bitwise operators - note that these are generated by .bitand() + # and .bitor(), the '&' and '|' are reserved for boolean operator + # usage. + BITAND = "&" + BITOR = "|" + BITLEFTSHIFT = "<<" + BITRIGHTSHIFT = ">>" + BITXOR = "#" + + def _combine(self, other, connector, reversed): + if not hasattr(other, "resolve_expression"): + # everything must be resolvable to an expression + other = Value(other) + + if reversed: + return CombinedExpression(other, connector, self) + return CombinedExpression(self, connector, other) + + ############# + # OPERATORS # + ############# + + def __neg__(self): + return self._combine(-1, self.MUL, False) + + def __add__(self, other): + return self._combine(other, self.ADD, False) + + def __sub__(self, other): + return self._combine(other, self.SUB, False) + + def __mul__(self, other): + return self._combine(other, self.MUL, False) + + def __truediv__(self, other): + return self._combine(other, self.DIV, False) + + def __mod__(self, other): + return self._combine(other, self.MOD, False) + + def __pow__(self, other): + return self._combine(other, self.POW, False) + + def __and__(self, other): + if getattr(self, "conditional", False) and getattr(other, "conditional", False): + return Q(self) & Q(other) + raise NotImplementedError( + "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." + ) + + def bitand(self, other): + return self._combine(other, self.BITAND, False) + + def bitleftshift(self, other): + return self._combine(other, self.BITLEFTSHIFT, False) + + def bitrightshift(self, other): + return self._combine(other, self.BITRIGHTSHIFT, False) + + def __xor__(self, other): + if getattr(self, "conditional", False) and getattr(other, "conditional", False): + return Q(self) ^ Q(other) + raise NotImplementedError( + "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." + ) + + def bitxor(self, other): + return self._combine(other, self.BITXOR, False) + + def __or__(self, other): + if getattr(self, "conditional", False) and getattr(other, "conditional", False): + return Q(self) | Q(other) + raise NotImplementedError( + "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." + ) + + def bitor(self, other): + return self._combine(other, self.BITOR, False) + + def __radd__(self, other): + return self._combine(other, self.ADD, True) + + def __rsub__(self, other): + return self._combine(other, self.SUB, True) + + def __rmul__(self, other): + return self._combine(other, self.MUL, True) + + def __rtruediv__(self, other): + return self._combine(other, self.DIV, True) + + def __rmod__(self, other): + return self._combine(other, self.MOD, True) + + def __rpow__(self, other): + return self._combine(other, self.POW, True) + + def __rand__(self, other): + raise NotImplementedError( + "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." + ) + + def __ror__(self, other): + raise NotImplementedError( + "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." + ) + + def __rxor__(self, other): + raise NotImplementedError( + "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." + ) + + def __invert__(self): + return NegatedExpression(self) + + +class BaseExpression: + """Base class for all query expressions.""" + + empty_result_set_value = NotImplemented + # aggregate specific fields + is_summary = False + _output_field_resolved_to_none = False + # Can the expression be used in a WHERE clause? + filterable = True + # Can the expression can be used as a source expression in Window? + window_compatible = False + # Can the expression be used as a database default value? + allowed_default = False + + def __init__(self, output_field=None): + if output_field is not None: + self.output_field = output_field + + def __getstate__(self): + state = self.__dict__.copy() + state.pop("convert_value", None) + return state + + def get_db_converters(self, connection): + return ( + [] + if self.convert_value is self._convert_value_noop + else [self.convert_value] + ) + self.output_field.get_db_converters(connection) + + def get_source_expressions(self): + return [] + + def set_source_expressions(self, exprs): + assert not exprs + + def _parse_expressions(self, *expressions): + return [ + arg + if hasattr(arg, "resolve_expression") + else (F(arg) if isinstance(arg, str) else Value(arg)) + for arg in expressions + ] + + def as_sql(self, compiler, connection): + """ + Responsible for returning a (sql, [params]) tuple to be included + in the current query. + + Different backends can provide their own implementation, by + providing an `as_{vendor}` method and patching the Expression: + + ``` + def override_as_sql(self, compiler, connection): + # custom logic + return super().as_sql(compiler, connection) + setattr(Expression, 'as_' + connection.vendor, override_as_sql) + ``` + + Arguments: + * compiler: the query compiler responsible for generating the query. + Must have a compile method, returning a (sql, [params]) tuple. + Calling compiler(value) will return a quoted `value`. + + * connection: the database connection used for the current query. + + Return: (sql, params) + Where `sql` is a string containing ordered sql parameters to be + replaced with the elements of the list `params`. + """ + raise NotImplementedError("Subclasses must implement as_sql()") + + @cached_property + def contains_aggregate(self): + return any( + expr and expr.contains_aggregate for expr in self.get_source_expressions() + ) + + @cached_property + def contains_over_clause(self): + return any( + expr and expr.contains_over_clause for expr in self.get_source_expressions() + ) + + @cached_property + def contains_column_references(self): + return any( + expr and expr.contains_column_references + for expr in self.get_source_expressions() + ) + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + """ + Provide the chance to do any preprocessing or validation before being + added to the query. + + Arguments: + * query: the backend query implementation + * allow_joins: boolean allowing or denying use of joins + in this query + * reuse: a set of reusable joins for multijoins + * summarize: a terminal aggregate clause + * for_save: whether this expression about to be used in a save or update + + Return: an Expression to be added to the query. + """ + c = self.copy() + c.is_summary = summarize + c.set_source_expressions( + [ + expr.resolve_expression(query, allow_joins, reuse, summarize) + if expr + else None + for expr in c.get_source_expressions() + ] + ) + return c + + @property + def conditional(self): + return isinstance(self.output_field, fields.BooleanField) + + @property + def field(self): + return self.output_field + + @cached_property + def output_field(self): + """Return the output type of this expressions.""" + output_field = self._resolve_output_field() + if output_field is None: + self._output_field_resolved_to_none = True + raise FieldError("Cannot resolve expression type, unknown output_field") + return output_field + + @cached_property + def _output_field_or_none(self): + """ + Return the output field of this expression, or None if + _resolve_output_field() didn't return an output type. + """ + try: + return self.output_field + except FieldError: + if not self._output_field_resolved_to_none: + raise + + def _resolve_output_field(self): + """ + Attempt to infer the output type of the expression. + + As a guess, if the output fields of all source fields match then simply + infer the same type here. + + If a source's output field resolves to None, exclude it from this check. + If all sources are None, then an error is raised higher up the stack in + the output_field property. + """ + # This guess is mostly a bad idea, but there is quite a lot of code + # (especially 3rd party Func subclasses) that depend on it, we'd need a + # deprecation path to fix it. + sources_iter = ( + source for source in self.get_source_fields() if source is not None + ) + for output_field in sources_iter: + for source in sources_iter: + if not isinstance(output_field, source.__class__): + raise FieldError( + "Expression contains mixed types: %s, %s. You must " + "set output_field." + % ( + output_field.__class__.__name__, + source.__class__.__name__, + ) + ) + return output_field + + @staticmethod + def _convert_value_noop(value, expression, connection): + return value + + @cached_property + def convert_value(self): + """ + Expressions provide their own converters because users have the option + of manually specifying the output_field which may be a different type + from the one the database returns. + """ + field = self.output_field + internal_type = field.get_internal_type() + if internal_type == "FloatField": + return ( + lambda value, expression, connection: None + if value is None + else float(value) + ) + elif internal_type.endswith("IntegerField"): + return ( + lambda value, expression, connection: None + if value is None + else int(value) + ) + elif internal_type == "DecimalField": + return ( + lambda value, expression, connection: None + if value is None + else Decimal(value) + ) + return self._convert_value_noop + + def get_lookup(self, lookup): + return self.output_field.get_lookup(lookup) + + def get_transform(self, name): + return self.output_field.get_transform(name) + + def relabeled_clone(self, change_map): + clone = self.copy() + clone.set_source_expressions( + [ + e.relabeled_clone(change_map) if e is not None else None + for e in self.get_source_expressions() + ] + ) + return clone + + def replace_expressions(self, replacements): + if replacement := replacements.get(self): + return replacement + clone = self.copy() + source_expressions = clone.get_source_expressions() + clone.set_source_expressions( + [ + expr.replace_expressions(replacements) if expr else None + for expr in source_expressions + ] + ) + return clone + + def get_refs(self): + refs = set() + for expr in self.get_source_expressions(): + refs |= expr.get_refs() + return refs + + def copy(self): + return copy.copy(self) + + def prefix_references(self, prefix): + clone = self.copy() + clone.set_source_expressions( + [ + F(f"{prefix}{expr.name}") + if isinstance(expr, F) + else expr.prefix_references(prefix) + for expr in self.get_source_expressions() + ] + ) + return clone + + def get_group_by_cols(self): + if not self.contains_aggregate: + return [self] + cols = [] + for source in self.get_source_expressions(): + cols.extend(source.get_group_by_cols()) + return cols + + def get_source_fields(self): + """Return the underlying field types used by this aggregate.""" + return [e._output_field_or_none for e in self.get_source_expressions()] + + def asc(self, **kwargs): + return OrderBy(self, **kwargs) + + def desc(self, **kwargs): + return OrderBy(self, descending=True, **kwargs) + + def reverse_ordering(self): + return self + + def flatten(self): + """ + Recursively yield this expression and all subexpressions, in + depth-first order. + """ + yield self + for expr in self.get_source_expressions(): + if expr: + if hasattr(expr, "flatten"): + yield from expr.flatten() + else: + yield expr + + def select_format(self, compiler, sql, params): + """ + Custom format for select clauses. For example, EXISTS expressions need + to be wrapped in CASE WHEN on Oracle. + """ + if hasattr(self.output_field, "select_format"): + return self.output_field.select_format(compiler, sql, params) + return sql, params + + +@deconstructible +class Expression(BaseExpression, Combinable): + """An expression that can be combined with other expressions.""" + + @cached_property + def identity(self): + constructor_signature = inspect.signature(self.__init__) + args, kwargs = self._constructor_args + signature = constructor_signature.bind_partial(*args, **kwargs) + signature.apply_defaults() + arguments = signature.arguments.items() + identity = [self.__class__] + for arg, value in arguments: + if isinstance(value, fields.Field): + if value.name and value.model: + value = (value.model._meta.label, value.name) + else: + value = type(value) + else: + value = make_hashable(value) + identity.append((arg, value)) + return tuple(identity) + + def __eq__(self, other): + if not isinstance(other, Expression): + return NotImplemented + return other.identity == self.identity + + def __hash__(self): + return hash(self.identity) + + +# Type inference for CombinedExpression.output_field. +# Missing items will result in FieldError, by design. +# +# The current approach for NULL is based on lowest common denominator behavior +# i.e. if one of the supported databases is raising an error (rather than +# return NULL) for `val NULL`, then Django raises FieldError. + +_connector_combinations = [ + # Numeric operations - operands of same type. + { + connector: [ + (fields.IntegerField, fields.IntegerField, fields.IntegerField), + (fields.FloatField, fields.FloatField, fields.FloatField), + (fields.DecimalField, fields.DecimalField, fields.DecimalField), + ] + for connector in ( + Combinable.ADD, + Combinable.SUB, + Combinable.MUL, + # Behavior for DIV with integer arguments follows Postgres/SQLite, + # not MySQL/Oracle. + Combinable.DIV, + Combinable.MOD, + Combinable.POW, + ) + }, + # Numeric operations - operands of different type. + { + connector: [ + (fields.IntegerField, fields.DecimalField, fields.DecimalField), + (fields.DecimalField, fields.IntegerField, fields.DecimalField), + (fields.IntegerField, fields.FloatField, fields.FloatField), + (fields.FloatField, fields.IntegerField, fields.FloatField), + ] + for connector in ( + Combinable.ADD, + Combinable.SUB, + Combinable.MUL, + Combinable.DIV, + Combinable.MOD, + ) + }, + # Bitwise operators. + { + connector: [ + (fields.IntegerField, fields.IntegerField, fields.IntegerField), + ] + for connector in ( + Combinable.BITAND, + Combinable.BITOR, + Combinable.BITLEFTSHIFT, + Combinable.BITRIGHTSHIFT, + Combinable.BITXOR, + ) + }, + # Numeric with NULL. + { + connector: [ + (field_type, NoneType, field_type), + (NoneType, field_type, field_type), + ] + for connector in ( + Combinable.ADD, + Combinable.SUB, + Combinable.MUL, + Combinable.DIV, + Combinable.MOD, + Combinable.POW, + ) + for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField) + }, + # Date/DateTimeField/DurationField/TimeField. + { + Combinable.ADD: [ + # Date/DateTimeField. + (fields.DateField, fields.DurationField, fields.DateTimeField), + (fields.DateTimeField, fields.DurationField, fields.DateTimeField), + (fields.DurationField, fields.DateField, fields.DateTimeField), + (fields.DurationField, fields.DateTimeField, fields.DateTimeField), + # DurationField. + (fields.DurationField, fields.DurationField, fields.DurationField), + # TimeField. + (fields.TimeField, fields.DurationField, fields.TimeField), + (fields.DurationField, fields.TimeField, fields.TimeField), + ], + }, + { + Combinable.SUB: [ + # Date/DateTimeField. + (fields.DateField, fields.DurationField, fields.DateTimeField), + (fields.DateTimeField, fields.DurationField, fields.DateTimeField), + (fields.DateField, fields.DateField, fields.DurationField), + (fields.DateField, fields.DateTimeField, fields.DurationField), + (fields.DateTimeField, fields.DateField, fields.DurationField), + (fields.DateTimeField, fields.DateTimeField, fields.DurationField), + # DurationField. + (fields.DurationField, fields.DurationField, fields.DurationField), + # TimeField. + (fields.TimeField, fields.DurationField, fields.TimeField), + (fields.TimeField, fields.TimeField, fields.DurationField), + ], + }, +] + +_connector_combinators = defaultdict(list) + + +def register_combinable_fields(lhs, connector, rhs, result): + """ + Register combinable types: + lhs rhs -> result + e.g. + register_combinable_fields( + IntegerField, Combinable.ADD, FloatField, FloatField + ) + """ + _connector_combinators[connector].append((lhs, rhs, result)) + + +for d in _connector_combinations: + for connector, field_types in d.items(): + for lhs, rhs, result in field_types: + register_combinable_fields(lhs, connector, rhs, result) + + +@functools.lru_cache(maxsize=128) +def _resolve_combined_type(connector, lhs_type, rhs_type): + combinators = _connector_combinators.get(connector, ()) + for combinator_lhs_type, combinator_rhs_type, combined_type in combinators: + if issubclass(lhs_type, combinator_lhs_type) and issubclass( + rhs_type, combinator_rhs_type + ): + return combined_type + + +class CombinedExpression(SQLiteNumericMixin, Expression): + def __init__(self, lhs, connector, rhs, output_field=None): + super().__init__(output_field=output_field) + self.connector = connector + self.lhs = lhs + self.rhs = rhs + + def __repr__(self): + return "<{}: {}>".format(self.__class__.__name__, self) + + def __str__(self): + return "{} {} {}".format(self.lhs, self.connector, self.rhs) + + def get_source_expressions(self): + return [self.lhs, self.rhs] + + def set_source_expressions(self, exprs): + self.lhs, self.rhs = exprs + + def _resolve_output_field(self): + # We avoid using super() here for reasons given in + # Expression._resolve_output_field() + combined_type = _resolve_combined_type( + self.connector, + type(self.lhs._output_field_or_none), + type(self.rhs._output_field_or_none), + ) + if combined_type is None: + raise FieldError( + f"Cannot infer type of {self.connector!r} expression involving these " + f"types: {self.lhs.output_field.__class__.__name__}, " + f"{self.rhs.output_field.__class__.__name__}. You must set " + f"output_field." + ) + return combined_type() + + def as_sql(self, compiler, connection): + expressions = [] + expression_params = [] + sql, params = compiler.compile(self.lhs) + expressions.append(sql) + expression_params.extend(params) + sql, params = compiler.compile(self.rhs) + expressions.append(sql) + expression_params.extend(params) + # order of precedence + expression_wrapper = "(%s)" + sql = connection.ops.combine_expression(self.connector, expressions) + return expression_wrapper % sql, expression_params + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + lhs = self.lhs.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + rhs = self.rhs.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + if not isinstance(self, (DurationExpression, TemporalSubtraction)): + try: + lhs_type = lhs.output_field.get_internal_type() + except (AttributeError, FieldError): + lhs_type = None + try: + rhs_type = rhs.output_field.get_internal_type() + except (AttributeError, FieldError): + rhs_type = None + if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type: + return DurationExpression( + self.lhs, self.connector, self.rhs + ).resolve_expression( + query, + allow_joins, + reuse, + summarize, + for_save, + ) + datetime_fields = {"DateField", "DateTimeField", "TimeField"} + if ( + self.connector == self.SUB + and lhs_type in datetime_fields + and lhs_type == rhs_type + ): + return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( + query, + allow_joins, + reuse, + summarize, + for_save, + ) + c = self.copy() + c.is_summary = summarize + c.lhs = lhs + c.rhs = rhs + return c + + @cached_property + def allowed_default(self): + return self.lhs.allowed_default and self.rhs.allowed_default + + +class DurationExpression(CombinedExpression): + def compile(self, side, compiler, connection): + try: + output = side.output_field + except FieldError: + pass + else: + if output.get_internal_type() == "DurationField": + sql, params = compiler.compile(side) + return connection.ops.format_for_duration_arithmetic(sql), params + return compiler.compile(side) + + def as_sql(self, compiler, connection): + if connection.features.has_native_duration_field: + return super().as_sql(compiler, connection) + connection.ops.check_expression_support(self) + expressions = [] + expression_params = [] + sql, params = self.compile(self.lhs, compiler, connection) + expressions.append(sql) + expression_params.extend(params) + sql, params = self.compile(self.rhs, compiler, connection) + expressions.append(sql) + expression_params.extend(params) + # order of precedence + expression_wrapper = "(%s)" + sql = connection.ops.combine_duration_expression(self.connector, expressions) + return expression_wrapper % sql, expression_params + + def as_sqlite(self, compiler, connection, **extra_context): + sql, params = self.as_sql(compiler, connection, **extra_context) + if self.connector in {Combinable.MUL, Combinable.DIV}: + try: + lhs_type = self.lhs.output_field.get_internal_type() + rhs_type = self.rhs.output_field.get_internal_type() + except (AttributeError, FieldError): + pass + else: + allowed_fields = { + "DecimalField", + "DurationField", + "FloatField", + "IntegerField", + } + if lhs_type not in allowed_fields or rhs_type not in allowed_fields: + raise DatabaseError( + f"Invalid arguments for operator {self.connector}." + ) + return sql, params + + +class TemporalSubtraction(CombinedExpression): + output_field = fields.DurationField() + + def __init__(self, lhs, rhs): + super().__init__(lhs, self.SUB, rhs) + + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + lhs = compiler.compile(self.lhs) + rhs = compiler.compile(self.rhs) + return connection.ops.subtract_temporals( + self.lhs.output_field.get_internal_type(), lhs, rhs + ) + + +@deconstructible(path="django.db.models.F") +class F(Combinable): + """An object capable of resolving references to existing query objects.""" + + allowed_default = False + + def __init__(self, name): + """ + Arguments: + * name: the name of the field this expression references + """ + self.name = name + + def __repr__(self): + return "{}({})".format(self.__class__.__name__, self.name) + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + return query.resolve_ref(self.name, allow_joins, reuse, summarize) + + def replace_expressions(self, replacements): + return replacements.get(self, self) + + def asc(self, **kwargs): + return OrderBy(self, **kwargs) + + def desc(self, **kwargs): + return OrderBy(self, descending=True, **kwargs) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.name == other.name + + def __hash__(self): + return hash(self.name) + + def copy(self): + return copy.copy(self) + + +class ResolvedOuterRef(F): + """ + An object that contains a reference to an outer query. + + In this case, the reference to the outer query has been resolved because + the inner query has been used as a subquery. + """ + + contains_aggregate = False + contains_over_clause = False + + def as_sql(self, *args, **kwargs): + raise ValueError( + "This queryset contains a reference to an outer query and may " + "only be used in a subquery." + ) + + def resolve_expression(self, *args, **kwargs): + col = super().resolve_expression(*args, **kwargs) + if col.contains_over_clause: + raise NotSupportedError( + f"Referencing outer query window expression is not supported: " + f"{self.name}." + ) + # FIXME: Rename possibly_multivalued to multivalued and fix detection + # for non-multivalued JOINs (e.g. foreign key fields). This should take + # into account only many-to-many and one-to-many relationships. + col.possibly_multivalued = LOOKUP_SEP in self.name + return col + + def relabeled_clone(self, relabels): + return self + + def get_group_by_cols(self): + return [] + + +class OuterRef(F): + contains_aggregate = False + + def resolve_expression(self, *args, **kwargs): + if isinstance(self.name, self.__class__): + return self.name + return ResolvedOuterRef(self.name) + + def relabeled_clone(self, relabels): + return self + + +@deconstructible(path="django.db.models.Func") +class Func(SQLiteNumericMixin, Expression): + """An SQL function call.""" + + function = None + template = "%(function)s(%(expressions)s)" + arg_joiner = ", " + arity = None # The number of arguments the function accepts. + + def __init__(self, *expressions, output_field=None, **extra): + if self.arity is not None and len(expressions) != self.arity: + raise TypeError( + "'%s' takes exactly %s %s (%s given)" + % ( + self.__class__.__name__, + self.arity, + "argument" if self.arity == 1 else "arguments", + len(expressions), + ) + ) + super().__init__(output_field=output_field) + self.source_expressions = self._parse_expressions(*expressions) + self.extra = extra + + def __repr__(self): + args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) + extra = {**self.extra, **self._get_repr_options()} + if extra: + extra = ", ".join( + str(key) + "=" + str(val) for key, val in sorted(extra.items()) + ) + return "{}({}, {})".format(self.__class__.__name__, args, extra) + return "{}({})".format(self.__class__.__name__, args) + + def _get_repr_options(self): + """Return a dict of extra __init__() options to include in the repr.""" + return {} + + def get_source_expressions(self): + return self.source_expressions + + def set_source_expressions(self, exprs): + self.source_expressions = exprs + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + c = self.copy() + c.is_summary = summarize + for pos, arg in enumerate(c.source_expressions): + c.source_expressions[pos] = arg.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + return c + + def as_sql( + self, + compiler, + connection, + function=None, + template=None, + arg_joiner=None, + **extra_context, + ): + connection.ops.check_expression_support(self) + sql_parts = [] + params = [] + for arg in self.source_expressions: + try: + arg_sql, arg_params = compiler.compile(arg) + except EmptyResultSet: + empty_result_set_value = getattr( + arg, "empty_result_set_value", NotImplemented + ) + if empty_result_set_value is NotImplemented: + raise + arg_sql, arg_params = compiler.compile(Value(empty_result_set_value)) + except FullResultSet: + arg_sql, arg_params = compiler.compile(Value(True)) + sql_parts.append(arg_sql) + params.extend(arg_params) + data = {**self.extra, **extra_context} + # Use the first supplied value in this order: the parameter to this + # method, a value supplied in __init__()'s **extra (the value in + # `data`), or the value defined on the class. + if function is not None: + data["function"] = function + else: + data.setdefault("function", self.function) + template = template or data.get("template", self.template) + arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner) + data["expressions"] = data["field"] = arg_joiner.join(sql_parts) + return template % data, params + + def copy(self): + copy = super().copy() + copy.source_expressions = self.source_expressions[:] + copy.extra = self.extra.copy() + return copy + + @cached_property + def allowed_default(self): + return all(expression.allowed_default for expression in self.source_expressions) + + +@deconstructible(path="django.db.models.Value") +class Value(SQLiteNumericMixin, Expression): + """Represent a wrapped value as a node within an expression.""" + + # Provide a default value for `for_save` in order to allow unresolved + # instances to be compiled until a decision is taken in #25425. + for_save = False + allowed_default = True + + def __init__(self, value, output_field=None): + """ + Arguments: + * value: the value this expression represents. The value will be + added into the sql parameter list and properly quoted. + + * output_field: an instance of the model field type that this + expression will return, such as IntegerField() or CharField(). + """ + super().__init__(output_field=output_field) + self.value = value + + def __repr__(self): + return f"{self.__class__.__name__}({self.value!r})" + + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + val = self.value + output_field = self._output_field_or_none + if output_field is not None: + if self.for_save: + val = output_field.get_db_prep_save(val, connection=connection) + else: + val = output_field.get_db_prep_value(val, connection=connection) + if hasattr(output_field, "get_placeholder"): + return output_field.get_placeholder(val, compiler, connection), [val] + if val is None: + # cx_Oracle does not always convert None to the appropriate + # NULL type (like in case expressions using numbers), so we + # use a literal SQL NULL + return "NULL", [] + return "%s", [val] + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) + c.for_save = for_save + return c + + def get_group_by_cols(self): + return [] + + def _resolve_output_field(self): + if isinstance(self.value, str): + return fields.CharField() + if isinstance(self.value, bool): + return fields.BooleanField() + if isinstance(self.value, int): + return fields.IntegerField() + if isinstance(self.value, float): + return fields.FloatField() + if isinstance(self.value, datetime.datetime): + return fields.DateTimeField() + if isinstance(self.value, datetime.date): + return fields.DateField() + if isinstance(self.value, datetime.time): + return fields.TimeField() + if isinstance(self.value, datetime.timedelta): + return fields.DurationField() + if isinstance(self.value, Decimal): + return fields.DecimalField() + if isinstance(self.value, bytes): + return fields.BinaryField() + if isinstance(self.value, UUID): + return fields.UUIDField() + + @property + def empty_result_set_value(self): + return self.value + + +class RawSQL(Expression): + allowed_default = True + + def __init__(self, sql, params, output_field=None): + if output_field is None: + output_field = fields.Field() + self.sql, self.params = sql, params + super().__init__(output_field=output_field) + + def __repr__(self): + return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) + + def as_sql(self, compiler, connection): + return "(%s)" % self.sql, self.params + + def get_group_by_cols(self): + return [self] + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + # Resolve parents fields used in raw SQL. + if query.model: + for parent in query.model._meta.get_parent_list(): + for parent_field in parent._meta.local_fields: + _, column_name = parent_field.get_attname_column() + if column_name.lower() in self.sql.lower(): + query.resolve_ref( + parent_field.name, allow_joins, reuse, summarize + ) + break + return super().resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + + +class Star(Expression): + def __repr__(self): + return "'*'" + + def as_sql(self, compiler, connection): + return "*", [] + + +class DatabaseDefault(Expression): + """Placeholder expression for the database default in an insert query.""" + + def as_sql(self, compiler, connection): + return "DEFAULT", [] + + +class Col(Expression): + contains_column_references = True + possibly_multivalued = False + + def __init__(self, alias, target, output_field=None): + if output_field is None: + output_field = target + super().__init__(output_field=output_field) + self.alias, self.target = alias, target + + def __repr__(self): + alias, target = self.alias, self.target + identifiers = (alias, str(target)) if alias else (str(target),) + return "{}({})".format(self.__class__.__name__, ", ".join(identifiers)) + + def as_sql(self, compiler, connection): + alias, column = self.alias, self.target.column + identifiers = (alias, column) if alias else (column,) + sql = ".".join(map(compiler.quote_name_unless_alias, identifiers)) + return sql, [] + + def relabeled_clone(self, relabels): + if self.alias is None: + return self + return self.__class__( + relabels.get(self.alias, self.alias), self.target, self.output_field + ) + + def get_group_by_cols(self): + return [self] + + def get_db_converters(self, connection): + if self.target == self.output_field: + return self.output_field.get_db_converters(connection) + return self.output_field.get_db_converters( + connection + ) + self.target.get_db_converters(connection) + + +class Ref(Expression): + """ + Reference to column alias of the query. For example, Ref('sum_cost') in + qs.annotate(sum_cost=Sum('cost')) query. + """ + + def __init__(self, refs, source): + super().__init__() + self.refs, self.source = refs, source + + def __repr__(self): + return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) + + def get_source_expressions(self): + return [self.source] + + def set_source_expressions(self, exprs): + (self.source,) = exprs + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + # The sub-expression `source` has already been resolved, as this is + # just a reference to the name of `source`. + return self + + def get_refs(self): + return {self.refs} + + def relabeled_clone(self, relabels): + return self + + def as_sql(self, compiler, connection): + return connection.ops.quote_name(self.refs), [] + + def get_group_by_cols(self): + return [self] + + +class ExpressionList(Func): + """ + An expression containing multiple expressions. Can be used to provide a + list of expressions as an argument to another expression, like a partition + clause. + """ + + template = "%(expressions)s" + + def __init__(self, *expressions, **extra): + if not expressions: + raise ValueError( + "%s requires at least one expression." % self.__class__.__name__ + ) + super().__init__(*expressions, **extra) + + def __str__(self): + return self.arg_joiner.join(str(arg) for arg in self.source_expressions) + + def as_sqlite(self, compiler, connection, **extra_context): + # Casting to numeric is unnecessary. + return self.as_sql(compiler, connection, **extra_context) + + +class OrderByList(Func): + allowed_default = False + template = "ORDER BY %(expressions)s" + + def __init__(self, *expressions, **extra): + expressions = ( + ( + OrderBy(F(expr[1:]), descending=True) + if isinstance(expr, str) and expr[0] == "-" + else expr + ) + for expr in expressions + ) + super().__init__(*expressions, **extra) + + def as_sql(self, *args, **kwargs): + if not self.source_expressions: + return "", () + return super().as_sql(*args, **kwargs) + + def get_group_by_cols(self): + group_by_cols = [] + for order_by in self.get_source_expressions(): + group_by_cols.extend(order_by.get_group_by_cols()) + return group_by_cols + + +@deconstructible(path="django.db.models.ExpressionWrapper") +class ExpressionWrapper(SQLiteNumericMixin, Expression): + """ + An expression that can wrap another expression so that it can provide + extra context to the inner expression, such as the output_field. + """ + + def __init__(self, expression, output_field): + super().__init__(output_field=output_field) + self.expression = expression + + def set_source_expressions(self, exprs): + self.expression = exprs[0] + + def get_source_expressions(self): + return [self.expression] + + def get_group_by_cols(self): + if isinstance(self.expression, Expression): + expression = self.expression.copy() + expression.output_field = self.output_field + return expression.get_group_by_cols() + # For non-expressions e.g. an SQL WHERE clause, the entire + # `expression` must be included in the GROUP BY clause. + return super().get_group_by_cols() + + def as_sql(self, compiler, connection): + return compiler.compile(self.expression) + + def __repr__(self): + return "{}({})".format(self.__class__.__name__, self.expression) + + @property + def allowed_default(self): + return self.expression.allowed_default + + +class NegatedExpression(ExpressionWrapper): + """The logical negation of a conditional expression.""" + + def __init__(self, expression): + super().__init__(expression, output_field=fields.BooleanField()) + + def __invert__(self): + return self.expression.copy() + + def as_sql(self, compiler, connection): + try: + sql, params = super().as_sql(compiler, connection) + except EmptyResultSet: + features = compiler.connection.features + if not features.supports_boolean_expr_in_select_clause: + return "1=1", () + return compiler.compile(Value(True)) + ops = compiler.connection.ops + # Some database backends (e.g. Oracle) don't allow EXISTS() and filters + # to be compared to another expression unless they're wrapped in a CASE + # WHEN. + if not ops.conditional_expression_supported_in_where_clause(self.expression): + return f"CASE WHEN {sql} = 0 THEN 1 ELSE 0 END", params + return f"NOT {sql}", params + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + resolved = super().resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + if not getattr(resolved.expression, "conditional", False): + raise TypeError("Cannot negate non-conditional expressions.") + return resolved + + def select_format(self, compiler, sql, params): + # Wrap boolean expressions with a CASE WHEN expression if a database + # backend (e.g. Oracle) doesn't support boolean expression in SELECT or + # GROUP BY list. + expression_supported_in_where_clause = ( + compiler.connection.ops.conditional_expression_supported_in_where_clause + ) + if ( + not compiler.connection.features.supports_boolean_expr_in_select_clause + # Avoid double wrapping. + and expression_supported_in_where_clause(self.expression) + ): + sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) + return sql, params + + +@deconstructible(path="django.db.models.When") +class When(Expression): + template = "WHEN %(condition)s THEN %(result)s" + # This isn't a complete conditional expression, must be used in Case(). + conditional = False + + def __init__(self, condition=None, then=None, **lookups): + if lookups: + if condition is None: + condition, lookups = Q(**lookups), None + elif getattr(condition, "conditional", False): + condition, lookups = Q(condition, **lookups), None + if condition is None or not getattr(condition, "conditional", False) or lookups: + raise TypeError( + "When() supports a Q object, a boolean expression, or lookups " + "as a condition." + ) + if isinstance(condition, Q) and not condition: + raise ValueError("An empty Q() can't be used as a When() condition.") + super().__init__(output_field=None) + self.condition = condition + self.result = self._parse_expressions(then)[0] + + def __str__(self): + return "WHEN %r THEN %r" % (self.condition, self.result) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + def get_source_expressions(self): + return [self.condition, self.result] + + def set_source_expressions(self, exprs): + self.condition, self.result = exprs + + def get_source_fields(self): + # We're only interested in the fields of the result expressions. + return [self.result._output_field_or_none] + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + c = self.copy() + c.is_summary = summarize + if hasattr(c.condition, "resolve_expression"): + c.condition = c.condition.resolve_expression( + query, allow_joins, reuse, summarize, False + ) + c.result = c.result.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + return c + + def as_sql(self, compiler, connection, template=None, **extra_context): + connection.ops.check_expression_support(self) + template_params = extra_context + sql_params = [] + condition_sql, condition_params = compiler.compile(self.condition) + template_params["condition"] = condition_sql + result_sql, result_params = compiler.compile(self.result) + template_params["result"] = result_sql + template = template or self.template + return template % template_params, ( + *sql_params, + *condition_params, + *result_params, + ) + + def get_group_by_cols(self): + # This is not a complete expression and cannot be used in GROUP BY. + cols = [] + for source in self.get_source_expressions(): + cols.extend(source.get_group_by_cols()) + return cols + + @cached_property + def allowed_default(self): + return self.condition.allowed_default and self.result.allowed_default + + +@deconstructible(path="django.db.models.Case") +class Case(SQLiteNumericMixin, Expression): + """ + An SQL searched CASE expression: + + CASE + WHEN n > 0 + THEN 'positive' + WHEN n < 0 + THEN 'negative' + ELSE 'zero' + END + """ + + template = "CASE %(cases)s ELSE %(default)s END" + case_joiner = " " + + def __init__(self, *cases, default=None, output_field=None, **extra): + if not all(isinstance(case, When) for case in cases): + raise TypeError("Positional arguments must all be When objects.") + super().__init__(output_field) + self.cases = list(cases) + self.default = self._parse_expressions(default)[0] + self.extra = extra + + def __str__(self): + return "CASE %s, ELSE %r" % ( + ", ".join(str(c) for c in self.cases), + self.default, + ) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + def get_source_expressions(self): + return self.cases + [self.default] + + def set_source_expressions(self, exprs): + *self.cases, self.default = exprs + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + c = self.copy() + c.is_summary = summarize + for pos, case in enumerate(c.cases): + c.cases[pos] = case.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + c.default = c.default.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + return c + + def copy(self): + c = super().copy() + c.cases = c.cases[:] + return c + + def as_sql( + self, compiler, connection, template=None, case_joiner=None, **extra_context + ): + connection.ops.check_expression_support(self) + if not self.cases: + return compiler.compile(self.default) + template_params = {**self.extra, **extra_context} + case_parts = [] + sql_params = [] + default_sql, default_params = compiler.compile(self.default) + for case in self.cases: + try: + case_sql, case_params = compiler.compile(case) + except EmptyResultSet: + continue + except FullResultSet: + default_sql, default_params = compiler.compile(case.result) + break + case_parts.append(case_sql) + sql_params.extend(case_params) + if not case_parts: + return default_sql, default_params + case_joiner = case_joiner or self.case_joiner + template_params["cases"] = case_joiner.join(case_parts) + template_params["default"] = default_sql + sql_params.extend(default_params) + template = template or template_params.get("template", self.template) + sql = template % template_params + if self._output_field_or_none is not None: + sql = connection.ops.unification_cast_sql(self.output_field) % sql + return sql, sql_params + + def get_group_by_cols(self): + if not self.cases: + return self.default.get_group_by_cols() + return super().get_group_by_cols() + + @cached_property + def allowed_default(self): + return self.default.allowed_default and all( + case_.allowed_default for case_ in self.cases + ) + + +class Subquery(BaseExpression, Combinable): + """ + An explicit subquery. It may contain OuterRef() references to the outer + query which will be resolved when it is applied to that query. + """ + + template = "(%(subquery)s)" + contains_aggregate = False + empty_result_set_value = None + subquery = True + + def __init__(self, queryset, output_field=None, **extra): + # Allow the usage of both QuerySet and sql.Query objects. + self.query = getattr(queryset, "query", queryset).clone() + self.query.subquery = True + self.extra = extra + super().__init__(output_field) + + def get_source_expressions(self): + return [self.query] + + def set_source_expressions(self, exprs): + self.query = exprs[0] + + def _resolve_output_field(self): + return self.query.output_field + + def copy(self): + clone = super().copy() + clone.query = clone.query.clone() + return clone + + @property + def external_aliases(self): + return self.query.external_aliases + + def get_external_cols(self): + return self.query.get_external_cols() + + def as_sql(self, compiler, connection, template=None, **extra_context): + connection.ops.check_expression_support(self) + template_params = {**self.extra, **extra_context} + subquery_sql, sql_params = self.query.as_sql(compiler, connection) + template_params["subquery"] = subquery_sql[1:-1] + + template = template or template_params.get("template", self.template) + sql = template % template_params + return sql, sql_params + + def get_group_by_cols(self): + return self.query.get_group_by_cols(wrapper=self) + + +class Exists(Subquery): + template = "EXISTS(%(subquery)s)" + output_field = fields.BooleanField() + empty_result_set_value = False + + def __init__(self, queryset, **kwargs): + super().__init__(queryset, **kwargs) + self.query = self.query.exists() + + def select_format(self, compiler, sql, params): + # Wrap EXISTS() with a CASE WHEN expression if a database backend + # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP + # BY list. + if not compiler.connection.features.supports_boolean_expr_in_select_clause: + sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) + return sql, params + + +@deconstructible(path="django.db.models.OrderBy") +class OrderBy(Expression): + template = "%(expression)s %(ordering)s" + conditional = False + + def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None): + if nulls_first and nulls_last: + raise ValueError("nulls_first and nulls_last are mutually exclusive") + if nulls_first is False or nulls_last is False: + raise ValueError("nulls_first and nulls_last values must be True or None.") + self.nulls_first = nulls_first + self.nulls_last = nulls_last + self.descending = descending + if not hasattr(expression, "resolve_expression"): + raise ValueError("expression must be an expression type") + self.expression = expression + + def __repr__(self): + return "{}({}, descending={})".format( + self.__class__.__name__, self.expression, self.descending + ) + + def set_source_expressions(self, exprs): + self.expression = exprs[0] + + def get_source_expressions(self): + return [self.expression] + + def as_sql(self, compiler, connection, template=None, **extra_context): + template = template or self.template + if connection.features.supports_order_by_nulls_modifier: + if self.nulls_last: + template = "%s NULLS LAST" % template + elif self.nulls_first: + template = "%s NULLS FIRST" % template + else: + if self.nulls_last and not ( + self.descending and connection.features.order_by_nulls_first + ): + template = "%%(expression)s IS NULL, %s" % template + elif self.nulls_first and not ( + not self.descending and connection.features.order_by_nulls_first + ): + template = "%%(expression)s IS NOT NULL, %s" % template + connection.ops.check_expression_support(self) + expression_sql, params = compiler.compile(self.expression) + placeholders = { + "expression": expression_sql, + "ordering": "DESC" if self.descending else "ASC", + **extra_context, + } + params *= template.count("%(expression)s") + return (template % placeholders).rstrip(), params + + def as_oracle(self, compiler, connection): + # Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped + # in a CASE WHEN. + if connection.ops.conditional_expression_supported_in_where_clause( + self.expression + ): + copy = self.copy() + copy.expression = Case( + When(self.expression, then=True), + default=False, + ) + return copy.as_sql(compiler, connection) + return self.as_sql(compiler, connection) + + def get_group_by_cols(self): + cols = [] + for source in self.get_source_expressions(): + cols.extend(source.get_group_by_cols()) + return cols + + def reverse_ordering(self): + self.descending = not self.descending + if self.nulls_first: + self.nulls_last = True + self.nulls_first = None + elif self.nulls_last: + self.nulls_first = True + self.nulls_last = None + return self + + def asc(self): + self.descending = False + + def desc(self): + self.descending = True + + +class Window(SQLiteNumericMixin, Expression): + template = "%(expression)s OVER (%(window)s)" + # Although the main expression may either be an aggregate or an + # expression with an aggregate function, the GROUP BY that will + # be introduced in the query as a result is not desired. + contains_aggregate = False + contains_over_clause = True + + def __init__( + self, + expression, + partition_by=None, + order_by=None, + frame=None, + output_field=None, + ): + self.partition_by = partition_by + self.order_by = order_by + self.frame = frame + + if not getattr(expression, "window_compatible", False): + raise ValueError( + "Expression '%s' isn't compatible with OVER clauses." + % expression.__class__.__name__ + ) + + if self.partition_by is not None: + if not isinstance(self.partition_by, (tuple, list)): + self.partition_by = (self.partition_by,) + self.partition_by = ExpressionList(*self.partition_by) + + if self.order_by is not None: + if isinstance(self.order_by, (list, tuple)): + self.order_by = OrderByList(*self.order_by) + elif isinstance(self.order_by, (BaseExpression, str)): + self.order_by = OrderByList(self.order_by) + else: + raise ValueError( + "Window.order_by must be either a string reference to a " + "field, an expression, or a list or tuple of them." + ) + super().__init__(output_field=output_field) + self.source_expression = self._parse_expressions(expression)[0] + + def _resolve_output_field(self): + return self.source_expression.output_field + + def get_source_expressions(self): + return [self.source_expression, self.partition_by, self.order_by, self.frame] + + def set_source_expressions(self, exprs): + self.source_expression, self.partition_by, self.order_by, self.frame = exprs + + def as_sql(self, compiler, connection, template=None): + connection.ops.check_expression_support(self) + if not connection.features.supports_over_clause: + raise NotSupportedError("This backend does not support window expressions.") + expr_sql, params = compiler.compile(self.source_expression) + window_sql, window_params = [], () + + if self.partition_by is not None: + sql_expr, sql_params = self.partition_by.as_sql( + compiler=compiler, + connection=connection, + template="PARTITION BY %(expressions)s", + ) + window_sql.append(sql_expr) + window_params += tuple(sql_params) + + if self.order_by is not None: + order_sql, order_params = compiler.compile(self.order_by) + window_sql.append(order_sql) + window_params += tuple(order_params) + + if self.frame: + frame_sql, frame_params = compiler.compile(self.frame) + window_sql.append(frame_sql) + window_params += tuple(frame_params) + + template = template or self.template + + return ( + template % {"expression": expr_sql, "window": " ".join(window_sql).strip()}, + (*params, *window_params), + ) + + def as_sqlite(self, compiler, connection): + if isinstance(self.output_field, fields.DecimalField): + # Casting to numeric must be outside of the window expression. + copy = self.copy() + source_expressions = copy.get_source_expressions() + source_expressions[0].output_field = fields.FloatField() + copy.set_source_expressions(source_expressions) + return super(Window, copy).as_sqlite(compiler, connection) + return self.as_sql(compiler, connection) + + def __str__(self): + return "{} OVER ({}{}{})".format( + str(self.source_expression), + "PARTITION BY " + str(self.partition_by) if self.partition_by else "", + str(self.order_by or ""), + str(self.frame or ""), + ) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + def get_group_by_cols(self): + group_by_cols = [] + if self.partition_by: + group_by_cols.extend(self.partition_by.get_group_by_cols()) + if self.order_by is not None: + group_by_cols.extend(self.order_by.get_group_by_cols()) + return group_by_cols + + +class WindowFrame(Expression): + """ + Model the frame clause in window expressions. There are two types of frame + clauses which are subclasses, however, all processing and validation (by no + means intended to be complete) is done here. Thus, providing an end for a + frame is optional (the default is UNBOUNDED FOLLOWING, which is the last + row in the frame). + """ + + template = "%(frame_type)s BETWEEN %(start)s AND %(end)s" + + def __init__(self, start=None, end=None): + self.start = Value(start) + self.end = Value(end) + + def set_source_expressions(self, exprs): + self.start, self.end = exprs + + def get_source_expressions(self): + return [self.start, self.end] + + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + start, end = self.window_frame_start_end( + connection, self.start.value, self.end.value + ) + return ( + self.template + % { + "frame_type": self.frame_type, + "start": start, + "end": end, + }, + [], + ) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + def get_group_by_cols(self): + return [] + + def __str__(self): + if self.start.value is not None and self.start.value < 0: + start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING) + elif self.start.value is not None and self.start.value == 0: + start = connection.ops.CURRENT_ROW + else: + start = connection.ops.UNBOUNDED_PRECEDING + + if self.end.value is not None and self.end.value > 0: + end = "%d %s" % (self.end.value, connection.ops.FOLLOWING) + elif self.end.value is not None and self.end.value == 0: + end = connection.ops.CURRENT_ROW + else: + end = connection.ops.UNBOUNDED_FOLLOWING + return self.template % { + "frame_type": self.frame_type, + "start": start, + "end": end, + } + + def window_frame_start_end(self, connection, start, end): + raise NotImplementedError("Subclasses must implement window_frame_start_end().") + + +class RowRange(WindowFrame): + frame_type = "ROWS" + + def window_frame_start_end(self, connection, start, end): + return connection.ops.window_frame_rows_start_end(start, end) + + +class ValueRange(WindowFrame): + frame_type = "RANGE" + + def window_frame_start_end(self, connection, start, end): + return connection.ops.window_frame_range_start_end(start, end) diff --git a/testbed/django__django/django/db/models/fields/__init__.py b/testbed/django__django/django/db/models/fields/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f958589bea1ae0d1e314e91242e587cb68538a61 --- /dev/null +++ b/testbed/django__django/django/db/models/fields/__init__.py @@ -0,0 +1,2869 @@ +import collections.abc +import copy +import datetime +import decimal +import operator +import uuid +import warnings +from base64 import b64decode, b64encode +from functools import partialmethod, total_ordering + +from django import forms +from django.apps import apps +from django.conf import settings +from django.core import checks, exceptions, validators +from django.db import connection, connections, router +from django.db.models.constants import LOOKUP_SEP +from django.db.models.enums import ChoicesMeta +from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin +from django.utils import timezone +from django.utils.datastructures import DictWrapper +from django.utils.dateparse import ( + parse_date, + parse_datetime, + parse_duration, + parse_time, +) +from django.utils.duration import duration_microseconds, duration_string +from django.utils.functional import Promise, cached_property +from django.utils.ipv6 import clean_ipv6_address +from django.utils.itercompat import is_iterable +from django.utils.text import capfirst +from django.utils.translation import gettext_lazy as _ + +__all__ = [ + "AutoField", + "BLANK_CHOICE_DASH", + "BigAutoField", + "BigIntegerField", + "BinaryField", + "BooleanField", + "CharField", + "CommaSeparatedIntegerField", + "DateField", + "DateTimeField", + "DecimalField", + "DurationField", + "EmailField", + "Empty", + "Field", + "FilePathField", + "FloatField", + "GenericIPAddressField", + "IPAddressField", + "IntegerField", + "NOT_PROVIDED", + "NullBooleanField", + "PositiveBigIntegerField", + "PositiveIntegerField", + "PositiveSmallIntegerField", + "SlugField", + "SmallAutoField", + "SmallIntegerField", + "TextField", + "TimeField", + "URLField", + "UUIDField", +] + + +class Empty: + pass + + +class NOT_PROVIDED: + pass + + +# The values to use for "blank" in SelectFields. Will be appended to the start +# of most "choices" lists. +BLANK_CHOICE_DASH = [("", "---------")] + + +def _load_field(app_label, model_name, field_name): + return apps.get_model(app_label, model_name)._meta.get_field(field_name) + + +# A guide to Field parameters: +# +# * name: The name of the field specified in the model. +# * attname: The attribute to use on the model object. This is the same as +# "name", except in the case of ForeignKeys, where "_id" is +# appended. +# * db_column: The db_column specified in the model (or None). +# * column: The database column for this field. This is the same as +# "attname", except if db_column is specified. +# +# Code that introspects values, or does other dynamic things, should use +# attname. For example, this gets the primary key value of object "obj": +# +# getattr(obj, opts.pk.attname) + + +def _empty(of_cls): + new = Empty() + new.__class__ = of_cls + return new + + +def return_None(): + return None + + +@total_ordering +class Field(RegisterLookupMixin): + """Base class for all field types""" + + # Designates whether empty strings fundamentally are allowed at the + # database level. + empty_strings_allowed = True + empty_values = list(validators.EMPTY_VALUES) + + # These track each time a Field instance is created. Used to retain order. + # The auto_creation_counter is used for fields that Django implicitly + # creates, creation_counter is used for all user-specified fields. + creation_counter = 0 + auto_creation_counter = -1 + default_validators = [] # Default set of validators + default_error_messages = { + "invalid_choice": _("Value %(value)r is not a valid choice."), + "null": _("This field cannot be null."), + "blank": _("This field cannot be blank."), + "unique": _("%(model_name)s with this %(field_label)s already exists."), + "unique_for_date": _( + # Translators: The 'lookup_type' is one of 'date', 'year' or + # 'month'. Eg: "Title must be unique for pub_date year" + "%(field_label)s must be unique for " + "%(date_field_label)s %(lookup_type)s." + ), + } + system_check_deprecated_details = None + system_check_removed_details = None + + # Attributes that don't affect a column definition. + # These attributes are ignored when altering the field. + non_db_attrs = ( + "blank", + "choices", + "db_column", + "editable", + "error_messages", + "help_text", + "limit_choices_to", + # Database-level options are not supported, see #21961. + "on_delete", + "related_name", + "related_query_name", + "validators", + "verbose_name", + ) + + # Field flags + hidden = False + + many_to_many = None + many_to_one = None + one_to_many = None + one_to_one = None + related_model = None + + descriptor_class = DeferredAttribute + + # Generic field type description, usually overridden by subclasses + def _description(self): + return _("Field of type: %(field_type)s") % { + "field_type": self.__class__.__name__ + } + + description = property(_description) + + def __init__( + self, + verbose_name=None, + name=None, + primary_key=False, + max_length=None, + unique=False, + blank=False, + null=False, + db_index=False, + rel=None, + default=NOT_PROVIDED, + editable=True, + serialize=True, + unique_for_date=None, + unique_for_month=None, + unique_for_year=None, + choices=None, + help_text="", + db_column=None, + db_tablespace=None, + auto_created=False, + validators=(), + error_messages=None, + db_comment=None, + db_default=NOT_PROVIDED, + ): + self.name = name + self.verbose_name = verbose_name # May be set by set_attributes_from_name + self._verbose_name = verbose_name # Store original for deconstruction + self.primary_key = primary_key + self.max_length, self._unique = max_length, unique + self.blank, self.null = blank, null + self.remote_field = rel + self.is_relation = self.remote_field is not None + self.default = default + if db_default is not NOT_PROVIDED and not hasattr( + db_default, "resolve_expression" + ): + from django.db.models.expressions import Value + + db_default = Value(db_default) + self.db_default = db_default + self.editable = editable + self.serialize = serialize + self.unique_for_date = unique_for_date + self.unique_for_month = unique_for_month + self.unique_for_year = unique_for_year + if isinstance(choices, ChoicesMeta): + choices = choices.choices + if isinstance(choices, collections.abc.Iterator): + choices = list(choices) + self.choices = choices + self.help_text = help_text + self.db_index = db_index + self.db_column = db_column + self.db_comment = db_comment + self._db_tablespace = db_tablespace + self.auto_created = auto_created + + # Adjust the appropriate creation counter, and save our local copy. + if auto_created: + self.creation_counter = Field.auto_creation_counter + Field.auto_creation_counter -= 1 + else: + self.creation_counter = Field.creation_counter + Field.creation_counter += 1 + + self._validators = list(validators) # Store for deconstruction later + + self._error_messages = error_messages # Store for deconstruction later + + def __str__(self): + """ + Return "app_label.model_label.field_name" for fields attached to + models. + """ + if not hasattr(self, "model"): + return super().__str__() + model = self.model + return "%s.%s" % (model._meta.label, self.name) + + def __repr__(self): + """Display the module, class, and name of the field.""" + path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) + name = getattr(self, "name", None) + if name is not None: + return "<%s: %s>" % (path, name) + return "<%s>" % path + + def check(self, **kwargs): + return [ + *self._check_field_name(), + *self._check_choices(), + *self._check_db_default(**kwargs), + *self._check_db_index(), + *self._check_db_comment(**kwargs), + *self._check_null_allowed_for_primary_keys(), + *self._check_backend_specific_checks(**kwargs), + *self._check_validators(), + *self._check_deprecation_details(), + ] + + def _check_field_name(self): + """ + Check if field name is valid, i.e. 1) does not end with an + underscore, 2) does not contain "__" and 3) is not "pk". + """ + if self.name.endswith("_"): + return [ + checks.Error( + "Field names must not end with an underscore.", + obj=self, + id="fields.E001", + ) + ] + elif LOOKUP_SEP in self.name: + return [ + checks.Error( + 'Field names must not contain "%s".' % LOOKUP_SEP, + obj=self, + id="fields.E002", + ) + ] + elif self.name == "pk": + return [ + checks.Error( + "'pk' is a reserved word that cannot be used as a field name.", + obj=self, + id="fields.E003", + ) + ] + else: + return [] + + @classmethod + def _choices_is_value(cls, value): + return isinstance(value, (str, Promise)) or not is_iterable(value) + + def _check_choices(self): + if not self.choices: + return [] + + if not is_iterable(self.choices) or isinstance(self.choices, str): + return [ + checks.Error( + "'choices' must be an iterable (e.g., a list or tuple).", + obj=self, + id="fields.E004", + ) + ] + + choice_max_length = 0 + # Expect [group_name, [value, display]] + for choices_group in self.choices: + try: + group_name, group_choices = choices_group + except (TypeError, ValueError): + # Containing non-pairs + break + try: + if not all( + self._choices_is_value(value) and self._choices_is_value(human_name) + for value, human_name in group_choices + ): + break + if self.max_length is not None and group_choices: + choice_max_length = max( + [ + choice_max_length, + *( + len(value) + for value, _ in group_choices + if isinstance(value, str) + ), + ] + ) + except (TypeError, ValueError): + # No groups, choices in the form [value, display] + value, human_name = group_name, group_choices + if not self._choices_is_value(value) or not self._choices_is_value( + human_name + ): + break + if self.max_length is not None and isinstance(value, str): + choice_max_length = max(choice_max_length, len(value)) + + # Special case: choices=['ab'] + if isinstance(choices_group, str): + break + else: + if self.max_length is not None and choice_max_length > self.max_length: + return [ + checks.Error( + "'max_length' is too small to fit the longest value " + "in 'choices' (%d characters)." % choice_max_length, + obj=self, + id="fields.E009", + ), + ] + return [] + + return [ + checks.Error( + "'choices' must be an iterable containing " + "(actual value, human readable name) tuples.", + obj=self, + id="fields.E005", + ) + ] + + def _check_db_default(self, databases=None, **kwargs): + from django.db.models.expressions import Value + + if ( + self.db_default is NOT_PROVIDED + or isinstance(self.db_default, Value) + or databases is None + ): + return [] + errors = [] + for db in databases: + if not router.allow_migrate_model(db, self.model): + continue + connection = connections[db] + + if not getattr(self.db_default, "allowed_default", False) and ( + connection.features.supports_expression_defaults + ): + msg = f"{self.db_default} cannot be used in db_default." + errors.append(checks.Error(msg, obj=self, id="fields.E012")) + + if not ( + connection.features.supports_expression_defaults + or "supports_expression_defaults" + in self.model._meta.required_db_features + ): + msg = ( + f"{connection.display_name} does not support default database " + "values with expressions (db_default)." + ) + errors.append(checks.Error(msg, obj=self, id="fields.E011")) + return errors + + def _check_db_index(self): + if self.db_index not in (None, True, False): + return [ + checks.Error( + "'db_index' must be None, True or False.", + obj=self, + id="fields.E006", + ) + ] + else: + return [] + + def _check_db_comment(self, databases=None, **kwargs): + if not self.db_comment or not databases: + return [] + errors = [] + for db in databases: + if not router.allow_migrate_model(db, self.model): + continue + connection = connections[db] + if not ( + connection.features.supports_comments + or "supports_comments" in self.model._meta.required_db_features + ): + errors.append( + checks.Warning( + f"{connection.display_name} does not support comments on " + f"columns (db_comment).", + obj=self, + id="fields.W163", + ) + ) + return errors + + def _check_null_allowed_for_primary_keys(self): + if ( + self.primary_key + and self.null + and not connection.features.interprets_empty_strings_as_nulls + ): + # We cannot reliably check this for backends like Oracle which + # consider NULL and '' to be equal (and thus set up + # character-based fields a little differently). + return [ + checks.Error( + "Primary keys must not have null=True.", + hint=( + "Set null=False on the field, or " + "remove primary_key=True argument." + ), + obj=self, + id="fields.E007", + ) + ] + else: + return [] + + def _check_backend_specific_checks(self, databases=None, **kwargs): + if databases is None: + return [] + errors = [] + for alias in databases: + if router.allow_migrate_model(alias, self.model): + errors.extend(connections[alias].validation.check_field(self, **kwargs)) + return errors + + def _check_validators(self): + errors = [] + for i, validator in enumerate(self.validators): + if not callable(validator): + errors.append( + checks.Error( + "All 'validators' must be callable.", + hint=( + "validators[{i}] ({repr}) isn't a function or " + "instance of a validator class.".format( + i=i, + repr=repr(validator), + ) + ), + obj=self, + id="fields.E008", + ) + ) + return errors + + def _check_deprecation_details(self): + if self.system_check_removed_details is not None: + return [ + checks.Error( + self.system_check_removed_details.get( + "msg", + "%s has been removed except for support in historical " + "migrations." % self.__class__.__name__, + ), + hint=self.system_check_removed_details.get("hint"), + obj=self, + id=self.system_check_removed_details.get("id", "fields.EXXX"), + ) + ] + elif self.system_check_deprecated_details is not None: + return [ + checks.Warning( + self.system_check_deprecated_details.get( + "msg", "%s has been deprecated." % self.__class__.__name__ + ), + hint=self.system_check_deprecated_details.get("hint"), + obj=self, + id=self.system_check_deprecated_details.get("id", "fields.WXXX"), + ) + ] + return [] + + def get_col(self, alias, output_field=None): + if alias == self.model._meta.db_table and ( + output_field is None or output_field == self + ): + return self.cached_col + from django.db.models.expressions import Col + + return Col(alias, self, output_field) + + @cached_property + def cached_col(self): + from django.db.models.expressions import Col + + return Col(self.model._meta.db_table, self) + + def select_format(self, compiler, sql, params): + """ + Custom format for select clauses. For example, GIS columns need to be + selected as AsText(table.col) on MySQL as the table.col data can't be + used by Django. + """ + return sql, params + + def deconstruct(self): + """ + Return enough information to recreate the field as a 4-tuple: + + * The name of the field on the model, if contribute_to_class() has + been run. + * The import path of the field, including the class, e.g. + django.db.models.IntegerField. This should be the most portable + version, so less specific may be better. + * A list of positional arguments. + * A dict of keyword arguments. + + Note that the positional or keyword arguments must contain values of + the following types (including inner values of collection types): + + * None, bool, str, int, float, complex, set, frozenset, list, tuple, + dict + * UUID + * datetime.datetime (naive), datetime.date + * top-level classes, top-level functions - will be referenced by their + full import path + * Storage instances - these have their own deconstruct() method + + This is because the values here must be serialized into a text format + (possibly new Python code, possibly JSON) and these are the only types + with encoding handlers defined. + + There's no need to return the exact way the field was instantiated this + time, just ensure that the resulting field is the same - prefer keyword + arguments over positional ones, and omit parameters with their default + values. + """ + # Short-form way of fetching all the default parameters + keywords = {} + possibles = { + "verbose_name": None, + "primary_key": False, + "max_length": None, + "unique": False, + "blank": False, + "null": False, + "db_index": False, + "default": NOT_PROVIDED, + "db_default": NOT_PROVIDED, + "editable": True, + "serialize": True, + "unique_for_date": None, + "unique_for_month": None, + "unique_for_year": None, + "choices": None, + "help_text": "", + "db_column": None, + "db_comment": None, + "db_tablespace": None, + "auto_created": False, + "validators": [], + "error_messages": None, + } + attr_overrides = { + "unique": "_unique", + "error_messages": "_error_messages", + "validators": "_validators", + "verbose_name": "_verbose_name", + "db_tablespace": "_db_tablespace", + } + equals_comparison = {"choices", "validators"} + for name, default in possibles.items(): + value = getattr(self, attr_overrides.get(name, name)) + # Unroll anything iterable for choices into a concrete list + if name == "choices" and isinstance(value, collections.abc.Iterable): + value = list(value) + # Do correct kind of comparison + if name in equals_comparison: + if value != default: + keywords[name] = value + else: + if value is not default: + keywords[name] = value + # Work out path - we shorten it for known Django core fields + path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) + if path.startswith("django.db.models.fields.related"): + path = path.replace("django.db.models.fields.related", "django.db.models") + elif path.startswith("django.db.models.fields.files"): + path = path.replace("django.db.models.fields.files", "django.db.models") + elif path.startswith("django.db.models.fields.json"): + path = path.replace("django.db.models.fields.json", "django.db.models") + elif path.startswith("django.db.models.fields.proxy"): + path = path.replace("django.db.models.fields.proxy", "django.db.models") + elif path.startswith("django.db.models.fields"): + path = path.replace("django.db.models.fields", "django.db.models") + # Return basic info - other fields should override this. + return (self.name, path, [], keywords) + + def clone(self): + """ + Uses deconstruct() to clone a new copy of this Field. + Will not preserve any class attachments/attribute names. + """ + name, path, args, kwargs = self.deconstruct() + return self.__class__(*args, **kwargs) + + def __eq__(self, other): + # Needed for @total_ordering + if isinstance(other, Field): + return self.creation_counter == other.creation_counter and getattr( + self, "model", None + ) == getattr(other, "model", None) + return NotImplemented + + def __lt__(self, other): + # This is needed because bisect does not take a comparison function. + # Order by creation_counter first for backward compatibility. + if isinstance(other, Field): + if ( + self.creation_counter != other.creation_counter + or not hasattr(self, "model") + and not hasattr(other, "model") + ): + return self.creation_counter < other.creation_counter + elif hasattr(self, "model") != hasattr(other, "model"): + return not hasattr(self, "model") # Order no-model fields first + else: + # creation_counter's are equal, compare only models. + return (self.model._meta.app_label, self.model._meta.model_name) < ( + other.model._meta.app_label, + other.model._meta.model_name, + ) + return NotImplemented + + def __hash__(self): + return hash(self.creation_counter) + + def __deepcopy__(self, memodict): + # We don't have to deepcopy very much here, since most things are not + # intended to be altered after initial creation. + obj = copy.copy(self) + if self.remote_field: + obj.remote_field = copy.copy(self.remote_field) + if hasattr(self.remote_field, "field") and self.remote_field.field is self: + obj.remote_field.field = obj + memodict[id(self)] = obj + return obj + + def __copy__(self): + # We need to avoid hitting __reduce__, so define this + # slightly weird copy construct. + obj = Empty() + obj.__class__ = self.__class__ + obj.__dict__ = self.__dict__.copy() + return obj + + def __reduce__(self): + """ + Pickling should return the model._meta.fields instance of the field, + not a new copy of that field. So, use the app registry to load the + model and then the field back. + """ + if not hasattr(self, "model"): + # Fields are sometimes used without attaching them to models (for + # example in aggregation). In this case give back a plain field + # instance. The code below will create a new empty instance of + # class self.__class__, then update its dict with self.__dict__ + # values - so, this is very close to normal pickle. + state = self.__dict__.copy() + # The _get_default cached_property can't be pickled due to lambda + # usage. + state.pop("_get_default", None) + return _empty, (self.__class__,), state + return _load_field, ( + self.model._meta.app_label, + self.model._meta.object_name, + self.name, + ) + + def get_pk_value_on_save(self, instance): + """ + Hook to generate new PK values on save. This method is called when + saving instances with no primary key value set. If this method returns + something else than None, then the returned value is used when saving + the new instance. + """ + if self.default: + return self.get_default() + return None + + def to_python(self, value): + """ + Convert the input value into the expected Python data type, raising + django.core.exceptions.ValidationError if the data can't be converted. + Return the converted value. Subclasses should override this. + """ + return value + + @cached_property + def error_messages(self): + messages = {} + for c in reversed(self.__class__.__mro__): + messages.update(getattr(c, "default_error_messages", {})) + messages.update(self._error_messages or {}) + return messages + + @cached_property + def validators(self): + """ + Some validators can't be created at field initialization time. + This method provides a way to delay their creation until required. + """ + return [*self.default_validators, *self._validators] + + def run_validators(self, value): + if value in self.empty_values: + return + + errors = [] + for v in self.validators: + try: + v(value) + except exceptions.ValidationError as e: + if hasattr(e, "code") and e.code in self.error_messages: + e.message = self.error_messages[e.code] + errors.extend(e.error_list) + + if errors: + raise exceptions.ValidationError(errors) + + def validate(self, value, model_instance): + """ + Validate value and raise ValidationError if necessary. Subclasses + should override this to provide validation logic. + """ + if not self.editable: + # Skip validation for non-editable fields. + return + + if self.choices is not None and value not in self.empty_values: + for option_key, option_value in self.choices: + if isinstance(option_value, (list, tuple)): + # This is an optgroup, so look inside the group for + # options. + for optgroup_key, optgroup_value in option_value: + if value == optgroup_key: + return + elif value == option_key: + return + raise exceptions.ValidationError( + self.error_messages["invalid_choice"], + code="invalid_choice", + params={"value": value}, + ) + + if value is None and not self.null: + raise exceptions.ValidationError(self.error_messages["null"], code="null") + + if not self.blank and value in self.empty_values: + raise exceptions.ValidationError(self.error_messages["blank"], code="blank") + + def clean(self, value, model_instance): + """ + Convert the value's type and run validation. Validation errors + from to_python() and validate() are propagated. Return the correct + value if no error is raised. + """ + value = self.to_python(value) + self.validate(value, model_instance) + self.run_validators(value) + return value + + def db_type_parameters(self, connection): + return DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") + + def db_check(self, connection): + """ + Return the database column check constraint for this field, for the + provided connection. Works the same way as db_type() for the case that + get_internal_type() does not map to a preexisting model field. + """ + data = self.db_type_parameters(connection) + try: + return ( + connection.data_type_check_constraints[self.get_internal_type()] % data + ) + except KeyError: + return None + + def db_type(self, connection): + """ + Return the database column data type for this field, for the provided + connection. + """ + # The default implementation of this method looks at the + # backend-specific data_types dictionary, looking up the field by its + # "internal type". + # + # A Field class can implement the get_internal_type() method to specify + # which *preexisting* Django Field class it's most similar to -- i.e., + # a custom field might be represented by a TEXT column type, which is + # the same as the TextField Django field type, which means the custom + # field's get_internal_type() returns 'TextField'. + # + # But the limitation of the get_internal_type() / data_types approach + # is that it cannot handle database column types that aren't already + # mapped to one of the built-in Django field types. In this case, you + # can implement db_type() instead of get_internal_type() to specify + # exactly which wacky database column type you want to use. + data = self.db_type_parameters(connection) + try: + column_type = connection.data_types[self.get_internal_type()] + except KeyError: + return None + else: + # column_type is either a single-parameter function or a string. + if callable(column_type): + return column_type(data) + return column_type % data + + def rel_db_type(self, connection): + """ + Return the data type that a related field pointing to this field should + use. For example, this method is called by ForeignKey and OneToOneField + to determine its data type. + """ + return self.db_type(connection) + + def cast_db_type(self, connection): + """Return the data type to use in the Cast() function.""" + db_type = connection.ops.cast_data_types.get(self.get_internal_type()) + if db_type: + return db_type % self.db_type_parameters(connection) + return self.db_type(connection) + + def db_parameters(self, connection): + """ + Extension of db_type(), providing a range of different return values + (type, checks). This will look at db_type(), allowing custom model + fields to override it. + """ + type_string = self.db_type(connection) + check_string = self.db_check(connection) + return { + "type": type_string, + "check": check_string, + } + + def db_type_suffix(self, connection): + return connection.data_types_suffix.get(self.get_internal_type()) + + def get_db_converters(self, connection): + if hasattr(self, "from_db_value"): + return [self.from_db_value] + return [] + + @property + def unique(self): + return self._unique or self.primary_key + + @property + def db_tablespace(self): + return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE + + @property + def db_returning(self): + """Private API intended only to be used by Django itself.""" + return ( + self.db_default is not NOT_PROVIDED + and connection.features.can_return_columns_from_insert + ) + + def set_attributes_from_name(self, name): + self.name = self.name or name + self.attname, self.column = self.get_attname_column() + self.concrete = self.column is not None + if self.verbose_name is None and self.name: + self.verbose_name = self.name.replace("_", " ") + + def contribute_to_class(self, cls, name, private_only=False): + """ + Register the field with the model class it belongs to. + + If private_only is True, create a separate instance of this field + for every subclass of cls, even if cls is not an abstract model. + """ + self.set_attributes_from_name(name) + self.model = cls + cls._meta.add_field(self, private=private_only) + if self.column: + setattr(cls, self.attname, self.descriptor_class(self)) + if self.choices is not None: + # Don't override a get_FOO_display() method defined explicitly on + # this class, but don't check methods derived from inheritance, to + # allow overriding inherited choices. For more complex inheritance + # structures users should override contribute_to_class(). + if "get_%s_display" % self.name not in cls.__dict__: + setattr( + cls, + "get_%s_display" % self.name, + partialmethod(cls._get_FIELD_display, field=self), + ) + + def get_filter_kwargs_for_object(self, obj): + """ + Return a dict that when passed as kwargs to self.model.filter(), would + yield all instances having the same value for this field as obj has. + """ + return {self.name: getattr(obj, self.attname)} + + def get_attname(self): + return self.name + + def get_attname_column(self): + attname = self.get_attname() + column = self.db_column or attname + return attname, column + + def get_internal_type(self): + return self.__class__.__name__ + + def pre_save(self, model_instance, add): + """Return field's value just before saving.""" + value = getattr(model_instance, self.attname) + if not connection.features.supports_default_keyword_in_insert: + from django.db.models.expressions import DatabaseDefault + + if isinstance(value, DatabaseDefault): + return self.db_default + return value + + def get_prep_value(self, value): + """Perform preliminary non-db specific value checks and conversions.""" + if isinstance(value, Promise): + value = value._proxy____cast() + return value + + def get_db_prep_value(self, value, connection, prepared=False): + """ + Return field's value prepared for interacting with the database backend. + + Used by the default implementations of get_db_prep_save(). + """ + if not prepared: + value = self.get_prep_value(value) + return value + + def get_db_prep_save(self, value, connection): + """Return field's value prepared for saving into a database.""" + if hasattr(value, "as_sql"): + return value + return self.get_db_prep_value(value, connection=connection, prepared=False) + + def has_default(self): + """Return a boolean of whether this field has a default value.""" + return self.default is not NOT_PROVIDED + + def get_default(self): + """Return the default value for this field.""" + return self._get_default() + + @cached_property + def _get_default(self): + if self.has_default(): + if callable(self.default): + return self.default + return lambda: self.default + + if self.db_default is not NOT_PROVIDED: + from django.db.models.expressions import DatabaseDefault + + return DatabaseDefault + + if ( + not self.empty_strings_allowed + or self.null + and not connection.features.interprets_empty_strings_as_nulls + ): + return return_None + return str # return empty string + + def get_choices( + self, + include_blank=True, + blank_choice=BLANK_CHOICE_DASH, + limit_choices_to=None, + ordering=(), + ): + """ + Return choices with a default blank choices included, for use + as choices for this field. + + Analog of django.db.models.fields.Field.get_choices(), provided + initially for utilization by RelatedFieldListFilter. + """ + limit_choices_to = limit_choices_to or self.limit_choices_to + qs = self.related_model._default_manager.complex_filter(limit_choices_to) + if ordering: + qs = qs.order_by(*ordering) + return (blank_choice if include_blank else []) + [(x.pk, str(x)) for x in qs] + + def is_hidden(self): + """Should the related object be hidden?""" + return bool(self.related_name) and self.related_name[-1] == "+" + + def get_joining_columns(self): + warnings.warn( + "ForeignObjectRel.get_joining_columns() is deprecated. Use " + "get_joining_fields() instead.", + RemovedInDjango60Warning, + ) + return self.field.get_reverse_joining_columns() + + def get_joining_fields(self): + return self.field.get_reverse_joining_fields() + + def get_extra_restriction(self, alias, related_alias): + return self.field.get_extra_restriction(related_alias, alias) + + def set_field_name(self): + """ + Set the related field's name, this is not available until later stages + of app loading, so set_field_name is called from + set_attributes_from_rel() + """ + # By default foreign object doesn't relate to any remote field (for + # example custom multicolumn joins currently have no remote field). + self.field_name = None + + def get_accessor_name(self, model=None): + # This method encapsulates the logic that decides what name to give an + # accessor descriptor that retrieves related many-to-one or + # many-to-many objects. It uses the lowercased object_name + "_set", + # but this can be overridden with the "related_name" option. Due to + # backwards compatibility ModelForms need to be able to provide an + # alternate model. See BaseInlineFormSet.get_default_prefix(). + opts = model._meta if model else self.related_model._meta + model = model or self.related_model + if self.multiple: + # If this is a symmetrical m2m relation on self, there is no + # reverse accessor. + if self.symmetrical and model == self.model: + return None + if self.related_name: + return self.related_name + return opts.model_name + ("_set" if self.multiple else "") + + def get_path_info(self, filtered_relation=None): + if filtered_relation: + return self.field.get_reverse_path_info(filtered_relation) + else: + return self.field.reverse_path_infos + + @cached_property + def path_infos(self): + return self.get_path_info() + + def get_cache_name(self): + """ + Return the name of the cache key to use for storing an instance of the + forward model on the reverse model. + """ + return self.get_accessor_name() + + +class ManyToOneRel(ForeignObjectRel): + """ + Used by the ForeignKey field to store information about the relation. + + ``_meta.get_fields()`` returns this class to provide access to the field + flags for the reverse relation. + + Note: Because we somewhat abuse the Rel objects by using them as reverse + fields we get the funny situation where + ``ManyToOneRel.many_to_one == False`` and + ``ManyToOneRel.one_to_many == True``. This is unfortunate but the actual + ManyToOneRel class is a private API and there is work underway to turn + reverse relations into actual fields. + """ + + def __init__( + self, + field, + to, + field_name, + related_name=None, + related_query_name=None, + limit_choices_to=None, + parent_link=False, + on_delete=None, + ): + super().__init__( + field, + to, + related_name=related_name, + related_query_name=related_query_name, + limit_choices_to=limit_choices_to, + parent_link=parent_link, + on_delete=on_delete, + ) + + self.field_name = field_name + + def __getstate__(self): + state = super().__getstate__() + state.pop("related_model", None) + return state + + @property + def identity(self): + return super().identity + (self.field_name,) + + def get_related_field(self): + """ + Return the Field in the 'to' object to which this relationship is tied. + """ + field = self.model._meta.get_field(self.field_name) + if not field.concrete: + raise exceptions.FieldDoesNotExist( + "No related field named '%s'" % self.field_name + ) + return field + + def set_field_name(self): + self.field_name = self.field_name or self.model._meta.pk.name + + +class OneToOneRel(ManyToOneRel): + """ + Used by OneToOneField to store information about the relation. + + ``_meta.get_fields()`` returns this class to provide access to the field + flags for the reverse relation. + """ + + def __init__( + self, + field, + to, + field_name, + related_name=None, + related_query_name=None, + limit_choices_to=None, + parent_link=False, + on_delete=None, + ): + super().__init__( + field, + to, + field_name, + related_name=related_name, + related_query_name=related_query_name, + limit_choices_to=limit_choices_to, + parent_link=parent_link, + on_delete=on_delete, + ) + + self.multiple = False + + +class ManyToManyRel(ForeignObjectRel): + """ + Used by ManyToManyField to store information about the relation. + + ``_meta.get_fields()`` returns this class to provide access to the field + flags for the reverse relation. + """ + + def __init__( + self, + field, + to, + related_name=None, + related_query_name=None, + limit_choices_to=None, + symmetrical=True, + through=None, + through_fields=None, + db_constraint=True, + ): + super().__init__( + field, + to, + related_name=related_name, + related_query_name=related_query_name, + limit_choices_to=limit_choices_to, + ) + + if through and not db_constraint: + raise ValueError("Can't supply a through model and db_constraint=False") + self.through = through + + if through_fields and not through: + raise ValueError("Cannot specify through_fields without a through model") + self.through_fields = through_fields + + self.symmetrical = symmetrical + self.db_constraint = db_constraint + + @property + def identity(self): + return super().identity + ( + self.through, + make_hashable(self.through_fields), + self.db_constraint, + ) + + def get_related_field(self): + """ + Return the field in the 'to' object to which this relationship is tied. + Provided for symmetry with ManyToOneRel. + """ + opts = self.through._meta + if self.through_fields: + field = opts.get_field(self.through_fields[0]) + else: + for field in opts.fields: + rel = getattr(field, "remote_field", None) + if rel and rel.model == self.model: + break + return field.foreign_related_fields[0] diff --git a/testbed/django__django/django/db/models/functions/__init__.py b/testbed/django__django/django/db/models/functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd7c8018942ff2b5a033e9861554e9aa0ddfbd19 --- /dev/null +++ b/testbed/django__django/django/db/models/functions/__init__.py @@ -0,0 +1,190 @@ +from .comparison import Cast, Coalesce, Collate, Greatest, JSONObject, Least, NullIf +from .datetime import ( + Extract, + ExtractDay, + ExtractHour, + ExtractIsoWeekDay, + ExtractIsoYear, + ExtractMinute, + ExtractMonth, + ExtractQuarter, + ExtractSecond, + ExtractWeek, + ExtractWeekDay, + ExtractYear, + Now, + Trunc, + TruncDate, + TruncDay, + TruncHour, + TruncMinute, + TruncMonth, + TruncQuarter, + TruncSecond, + TruncTime, + TruncWeek, + TruncYear, +) +from .math import ( + Abs, + ACos, + ASin, + ATan, + ATan2, + Ceil, + Cos, + Cot, + Degrees, + Exp, + Floor, + Ln, + Log, + Mod, + Pi, + Power, + Radians, + Random, + Round, + Sign, + Sin, + Sqrt, + Tan, +) +from .text import ( + MD5, + SHA1, + SHA224, + SHA256, + SHA384, + SHA512, + Chr, + Concat, + ConcatPair, + Left, + Length, + Lower, + LPad, + LTrim, + Ord, + Repeat, + Replace, + Reverse, + Right, + RPad, + RTrim, + StrIndex, + Substr, + Trim, + Upper, +) +from .window import ( + CumeDist, + DenseRank, + FirstValue, + Lag, + LastValue, + Lead, + NthValue, + Ntile, + PercentRank, + Rank, + RowNumber, +) + +__all__ = [ + # comparison and conversion + "Cast", + "Coalesce", + "Collate", + "Greatest", + "JSONObject", + "Least", + "NullIf", + # datetime + "Extract", + "ExtractDay", + "ExtractHour", + "ExtractMinute", + "ExtractMonth", + "ExtractQuarter", + "ExtractSecond", + "ExtractWeek", + "ExtractIsoWeekDay", + "ExtractWeekDay", + "ExtractIsoYear", + "ExtractYear", + "Now", + "Trunc", + "TruncDate", + "TruncDay", + "TruncHour", + "TruncMinute", + "TruncMonth", + "TruncQuarter", + "TruncSecond", + "TruncTime", + "TruncWeek", + "TruncYear", + # math + "Abs", + "ACos", + "ASin", + "ATan", + "ATan2", + "Ceil", + "Cos", + "Cot", + "Degrees", + "Exp", + "Floor", + "Ln", + "Log", + "Mod", + "Pi", + "Power", + "Radians", + "Random", + "Round", + "Sign", + "Sin", + "Sqrt", + "Tan", + # text + "MD5", + "SHA1", + "SHA224", + "SHA256", + "SHA384", + "SHA512", + "Chr", + "Concat", + "ConcatPair", + "Left", + "Length", + "Lower", + "LPad", + "LTrim", + "Ord", + "Repeat", + "Replace", + "Reverse", + "Right", + "RPad", + "RTrim", + "StrIndex", + "Substr", + "Trim", + "Upper", + # window + "CumeDist", + "DenseRank", + "FirstValue", + "Lag", + "LastValue", + "Lead", + "NthValue", + "Ntile", + "PercentRank", + "Rank", + "RowNumber", +] diff --git a/testbed/django__django/django/db/models/functions/comparison.py b/testbed/django__django/django/db/models/functions/comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..108d9047124821836e204cdc87eef7a8abdf68fa --- /dev/null +++ b/testbed/django__django/django/db/models/functions/comparison.py @@ -0,0 +1,221 @@ +"""Database functions that do comparisons or type conversions.""" +from django.db import NotSupportedError +from django.db.models.expressions import Func, Value +from django.db.models.fields import TextField +from django.db.models.fields.json import JSONField +from django.utils.regex_helper import _lazy_re_compile + + +class Cast(Func): + """Coerce an expression to a new field type.""" + + function = "CAST" + template = "%(function)s(%(expressions)s AS %(db_type)s)" + + def __init__(self, expression, output_field): + super().__init__(expression, output_field=output_field) + + def as_sql(self, compiler, connection, **extra_context): + extra_context["db_type"] = self.output_field.cast_db_type(connection) + return super().as_sql(compiler, connection, **extra_context) + + def as_sqlite(self, compiler, connection, **extra_context): + db_type = self.output_field.db_type(connection) + if db_type in {"datetime", "time"}: + # Use strftime as datetime/time don't keep fractional seconds. + template = "strftime(%%s, %(expressions)s)" + sql, params = super().as_sql( + compiler, connection, template=template, **extra_context + ) + format_string = "%H:%M:%f" if db_type == "time" else "%Y-%m-%d %H:%M:%f" + params.insert(0, format_string) + return sql, params + elif db_type == "date": + template = "date(%(expressions)s)" + return super().as_sql( + compiler, connection, template=template, **extra_context + ) + return self.as_sql(compiler, connection, **extra_context) + + def as_mysql(self, compiler, connection, **extra_context): + template = None + output_type = self.output_field.get_internal_type() + # MySQL doesn't support explicit cast to float. + if output_type == "FloatField": + template = "(%(expressions)s + 0.0)" + # MariaDB doesn't support explicit cast to JSON. + elif output_type == "JSONField" and connection.mysql_is_mariadb: + template = "JSON_EXTRACT(%(expressions)s, '$')" + return self.as_sql(compiler, connection, template=template, **extra_context) + + def as_postgresql(self, compiler, connection, **extra_context): + # CAST would be valid too, but the :: shortcut syntax is more readable. + # 'expressions' is wrapped in parentheses in case it's a complex + # expression. + return self.as_sql( + compiler, + connection, + template="(%(expressions)s)::%(db_type)s", + **extra_context, + ) + + def as_oracle(self, compiler, connection, **extra_context): + if self.output_field.get_internal_type() == "JSONField": + # Oracle doesn't support explicit cast to JSON. + template = "JSON_QUERY(%(expressions)s, '$')" + return super().as_sql( + compiler, connection, template=template, **extra_context + ) + return self.as_sql(compiler, connection, **extra_context) + + +class Coalesce(Func): + """Return, from left to right, the first non-null expression.""" + + function = "COALESCE" + + def __init__(self, *expressions, **extra): + if len(expressions) < 2: + raise ValueError("Coalesce must take at least two expressions") + super().__init__(*expressions, **extra) + + @property + def empty_result_set_value(self): + for expression in self.get_source_expressions(): + result = expression.empty_result_set_value + if result is NotImplemented or result is not None: + return result + return None + + def as_oracle(self, compiler, connection, **extra_context): + # Oracle prohibits mixing TextField (NCLOB) and CharField (NVARCHAR2), + # so convert all fields to NCLOB when that type is expected. + if self.output_field.get_internal_type() == "TextField": + clone = self.copy() + clone.set_source_expressions( + [ + Func(expression, function="TO_NCLOB") + for expression in self.get_source_expressions() + ] + ) + return super(Coalesce, clone).as_sql(compiler, connection, **extra_context) + return self.as_sql(compiler, connection, **extra_context) + + +class Collate(Func): + function = "COLLATE" + template = "%(expressions)s %(function)s %(collation)s" + allowed_default = False + # Inspired from + # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS + collation_re = _lazy_re_compile(r"^[\w\-]+$") + + def __init__(self, expression, collation): + if not (collation and self.collation_re.match(collation)): + raise ValueError("Invalid collation name: %r." % collation) + self.collation = collation + super().__init__(expression) + + def as_sql(self, compiler, connection, **extra_context): + extra_context.setdefault("collation", connection.ops.quote_name(self.collation)) + return super().as_sql(compiler, connection, **extra_context) + + +class Greatest(Func): + """ + Return the maximum expression. + + If any expression is null the return value is database-specific: + On PostgreSQL, the maximum not-null expression is returned. + On MySQL, Oracle, and SQLite, if any expression is null, null is returned. + """ + + function = "GREATEST" + + def __init__(self, *expressions, **extra): + if len(expressions) < 2: + raise ValueError("Greatest must take at least two expressions") + super().__init__(*expressions, **extra) + + def as_sqlite(self, compiler, connection, **extra_context): + """Use the MAX function on SQLite.""" + return super().as_sqlite(compiler, connection, function="MAX", **extra_context) + + +class JSONObject(Func): + function = "JSON_OBJECT" + output_field = JSONField() + + def __init__(self, **fields): + expressions = [] + for key, value in fields.items(): + expressions.extend((Value(key), value)) + super().__init__(*expressions) + + def as_sql(self, compiler, connection, **extra_context): + if not connection.features.has_json_object_function: + raise NotSupportedError( + "JSONObject() is not supported on this database backend." + ) + return super().as_sql(compiler, connection, **extra_context) + + def as_postgresql(self, compiler, connection, **extra_context): + copy = self.copy() + copy.set_source_expressions( + [ + Cast(expression, TextField()) if index % 2 == 0 else expression + for index, expression in enumerate(copy.get_source_expressions()) + ] + ) + return super(JSONObject, copy).as_sql( + compiler, + connection, + function="JSONB_BUILD_OBJECT", + **extra_context, + ) + + def as_oracle(self, compiler, connection, **extra_context): + class ArgJoiner: + def join(self, args): + args = [" VALUE ".join(arg) for arg in zip(args[::2], args[1::2])] + return ", ".join(args) + + return self.as_sql( + compiler, + connection, + arg_joiner=ArgJoiner(), + template="%(function)s(%(expressions)s RETURNING CLOB)", + **extra_context, + ) + + +class Least(Func): + """ + Return the minimum expression. + + If any expression is null the return value is database-specific: + On PostgreSQL, return the minimum not-null expression. + On MySQL, Oracle, and SQLite, if any expression is null, return null. + """ + + function = "LEAST" + + def __init__(self, *expressions, **extra): + if len(expressions) < 2: + raise ValueError("Least must take at least two expressions") + super().__init__(*expressions, **extra) + + def as_sqlite(self, compiler, connection, **extra_context): + """Use the MIN function on SQLite.""" + return super().as_sqlite(compiler, connection, function="MIN", **extra_context) + + +class NullIf(Func): + function = "NULLIF" + arity = 2 + + def as_oracle(self, compiler, connection, **extra_context): + expression1 = self.get_source_expressions()[0] + if isinstance(expression1, Value) and expression1.value is None: + raise ValueError("Oracle does not allow Value(None) for expression1.") + return super().as_sql(compiler, connection, **extra_context) diff --git a/testbed/django__django/django/db/models/functions/datetime.py b/testbed/django__django/django/db/models/functions/datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..eb5332ecc0a9972d28fbf3154c95c644c3cabd7b --- /dev/null +++ b/testbed/django__django/django/db/models/functions/datetime.py @@ -0,0 +1,435 @@ +from datetime import datetime + +from django.conf import settings +from django.db.models.expressions import Func +from django.db.models.fields import ( + DateField, + DateTimeField, + DurationField, + Field, + IntegerField, + TimeField, +) +from django.db.models.lookups import ( + Transform, + YearExact, + YearGt, + YearGte, + YearLt, + YearLte, +) +from django.utils import timezone + + +class TimezoneMixin: + tzinfo = None + + def get_tzname(self): + # Timezone conversions must happen to the input datetime *before* + # applying a function. 2015-12-31 23:00:00 -02:00 is stored in the + # database as 2016-01-01 01:00:00 +00:00. Any results should be + # based on the input datetime not the stored datetime. + tzname = None + if settings.USE_TZ: + if self.tzinfo is None: + tzname = timezone.get_current_timezone_name() + else: + tzname = timezone._get_timezone_name(self.tzinfo) + return tzname + + +class Extract(TimezoneMixin, Transform): + lookup_name = None + output_field = IntegerField() + + def __init__(self, expression, lookup_name=None, tzinfo=None, **extra): + if self.lookup_name is None: + self.lookup_name = lookup_name + if self.lookup_name is None: + raise ValueError("lookup_name must be provided") + self.tzinfo = tzinfo + super().__init__(expression, **extra) + + def as_sql(self, compiler, connection): + sql, params = compiler.compile(self.lhs) + lhs_output_field = self.lhs.output_field + if isinstance(lhs_output_field, DateTimeField): + tzname = self.get_tzname() + sql, params = connection.ops.datetime_extract_sql( + self.lookup_name, sql, tuple(params), tzname + ) + elif self.tzinfo is not None: + raise ValueError("tzinfo can only be used with DateTimeField.") + elif isinstance(lhs_output_field, DateField): + sql, params = connection.ops.date_extract_sql( + self.lookup_name, sql, tuple(params) + ) + elif isinstance(lhs_output_field, TimeField): + sql, params = connection.ops.time_extract_sql( + self.lookup_name, sql, tuple(params) + ) + elif isinstance(lhs_output_field, DurationField): + if not connection.features.has_native_duration_field: + raise ValueError( + "Extract requires native DurationField database support." + ) + sql, params = connection.ops.time_extract_sql( + self.lookup_name, sql, tuple(params) + ) + else: + # resolve_expression has already validated the output_field so this + # assert should never be hit. + assert False, "Tried to Extract from an invalid type." + return sql, params + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + copy = super().resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + field = getattr(copy.lhs, "output_field", None) + if field is None: + return copy + if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)): + raise ValueError( + "Extract input expression must be DateField, DateTimeField, " + "TimeField, or DurationField." + ) + # Passing dates to functions expecting datetimes is most likely a mistake. + if type(field) == DateField and copy.lookup_name in ( + "hour", + "minute", + "second", + ): + raise ValueError( + "Cannot extract time component '%s' from DateField '%s'." + % (copy.lookup_name, field.name) + ) + if isinstance(field, DurationField) and copy.lookup_name in ( + "year", + "iso_year", + "month", + "week", + "week_day", + "iso_week_day", + "quarter", + ): + raise ValueError( + "Cannot extract component '%s' from DurationField '%s'." + % (copy.lookup_name, field.name) + ) + return copy + + +class ExtractYear(Extract): + lookup_name = "year" + + +class ExtractIsoYear(Extract): + """Return the ISO-8601 week-numbering year.""" + + lookup_name = "iso_year" + + +class ExtractMonth(Extract): + lookup_name = "month" + + +class ExtractDay(Extract): + lookup_name = "day" + + +class ExtractWeek(Extract): + """ + Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the + week. + """ + + lookup_name = "week" + + +class ExtractWeekDay(Extract): + """ + Return Sunday=1 through Saturday=7. + + To replicate this in Python: (mydatetime.isoweekday() % 7) + 1 + """ + + lookup_name = "week_day" + + +class ExtractIsoWeekDay(Extract): + """Return Monday=1 through Sunday=7, based on ISO-8601.""" + + lookup_name = "iso_week_day" + + +class ExtractQuarter(Extract): + lookup_name = "quarter" + + +class ExtractHour(Extract): + lookup_name = "hour" + + +class ExtractMinute(Extract): + lookup_name = "minute" + + +class ExtractSecond(Extract): + lookup_name = "second" + + +DateField.register_lookup(ExtractYear) +DateField.register_lookup(ExtractMonth) +DateField.register_lookup(ExtractDay) +DateField.register_lookup(ExtractWeekDay) +DateField.register_lookup(ExtractIsoWeekDay) +DateField.register_lookup(ExtractWeek) +DateField.register_lookup(ExtractIsoYear) +DateField.register_lookup(ExtractQuarter) + +TimeField.register_lookup(ExtractHour) +TimeField.register_lookup(ExtractMinute) +TimeField.register_lookup(ExtractSecond) + +DateTimeField.register_lookup(ExtractHour) +DateTimeField.register_lookup(ExtractMinute) +DateTimeField.register_lookup(ExtractSecond) + +ExtractYear.register_lookup(YearExact) +ExtractYear.register_lookup(YearGt) +ExtractYear.register_lookup(YearGte) +ExtractYear.register_lookup(YearLt) +ExtractYear.register_lookup(YearLte) + +ExtractIsoYear.register_lookup(YearExact) +ExtractIsoYear.register_lookup(YearGt) +ExtractIsoYear.register_lookup(YearGte) +ExtractIsoYear.register_lookup(YearLt) +ExtractIsoYear.register_lookup(YearLte) + + +class Now(Func): + template = "CURRENT_TIMESTAMP" + output_field = DateTimeField() + + def as_postgresql(self, compiler, connection, **extra_context): + # PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the + # transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with + # other databases. + return self.as_sql( + compiler, connection, template="STATEMENT_TIMESTAMP()", **extra_context + ) + + def as_mysql(self, compiler, connection, **extra_context): + return self.as_sql( + compiler, connection, template="CURRENT_TIMESTAMP(6)", **extra_context + ) + + def as_sqlite(self, compiler, connection, **extra_context): + return self.as_sql( + compiler, + connection, + template="STRFTIME('%%%%Y-%%%%m-%%%%d %%%%H:%%%%M:%%%%f', 'NOW')", + **extra_context, + ) + + def as_oracle(self, compiler, connection, **extra_context): + return self.as_sql( + compiler, connection, template="LOCALTIMESTAMP", **extra_context + ) + + +class TruncBase(TimezoneMixin, Transform): + kind = None + tzinfo = None + + def __init__( + self, + expression, + output_field=None, + tzinfo=None, + **extra, + ): + self.tzinfo = tzinfo + super().__init__(expression, output_field=output_field, **extra) + + def as_sql(self, compiler, connection): + sql, params = compiler.compile(self.lhs) + tzname = None + if isinstance(self.lhs.output_field, DateTimeField): + tzname = self.get_tzname() + elif self.tzinfo is not None: + raise ValueError("tzinfo can only be used with DateTimeField.") + if isinstance(self.output_field, DateTimeField): + sql, params = connection.ops.datetime_trunc_sql( + self.kind, sql, tuple(params), tzname + ) + elif isinstance(self.output_field, DateField): + sql, params = connection.ops.date_trunc_sql( + self.kind, sql, tuple(params), tzname + ) + elif isinstance(self.output_field, TimeField): + sql, params = connection.ops.time_trunc_sql( + self.kind, sql, tuple(params), tzname + ) + else: + raise ValueError( + "Trunc only valid on DateField, TimeField, or DateTimeField." + ) + return sql, params + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + copy = super().resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + field = copy.lhs.output_field + # DateTimeField is a subclass of DateField so this works for both. + if not isinstance(field, (DateField, TimeField)): + raise TypeError( + "%r isn't a DateField, TimeField, or DateTimeField." % field.name + ) + # If self.output_field was None, then accessing the field will trigger + # the resolver to assign it to self.lhs.output_field. + if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)): + raise ValueError( + "output_field must be either DateField, TimeField, or DateTimeField" + ) + # Passing dates or times to functions expecting datetimes is most + # likely a mistake. + class_output_field = ( + self.__class__.output_field + if isinstance(self.__class__.output_field, Field) + else None + ) + output_field = class_output_field or copy.output_field + has_explicit_output_field = ( + class_output_field or field.__class__ is not copy.output_field.__class__ + ) + if type(field) == DateField and ( + isinstance(output_field, DateTimeField) + or copy.kind in ("hour", "minute", "second", "time") + ): + raise ValueError( + "Cannot truncate DateField '%s' to %s." + % ( + field.name, + output_field.__class__.__name__ + if has_explicit_output_field + else "DateTimeField", + ) + ) + elif isinstance(field, TimeField) and ( + isinstance(output_field, DateTimeField) + or copy.kind in ("year", "quarter", "month", "week", "day", "date") + ): + raise ValueError( + "Cannot truncate TimeField '%s' to %s." + % ( + field.name, + output_field.__class__.__name__ + if has_explicit_output_field + else "DateTimeField", + ) + ) + return copy + + def convert_value(self, value, expression, connection): + if isinstance(self.output_field, DateTimeField): + if not settings.USE_TZ: + pass + elif value is not None: + value = value.replace(tzinfo=None) + value = timezone.make_aware(value, self.tzinfo) + elif not connection.features.has_zoneinfo_database: + raise ValueError( + "Database returned an invalid datetime value. Are time " + "zone definitions for your database installed?" + ) + elif isinstance(value, datetime): + if value is None: + pass + elif isinstance(self.output_field, DateField): + value = value.date() + elif isinstance(self.output_field, TimeField): + value = value.time() + return value + + +class Trunc(TruncBase): + def __init__( + self, + expression, + kind, + output_field=None, + tzinfo=None, + **extra, + ): + self.kind = kind + super().__init__(expression, output_field=output_field, tzinfo=tzinfo, **extra) + + +class TruncYear(TruncBase): + kind = "year" + + +class TruncQuarter(TruncBase): + kind = "quarter" + + +class TruncMonth(TruncBase): + kind = "month" + + +class TruncWeek(TruncBase): + """Truncate to midnight on the Monday of the week.""" + + kind = "week" + + +class TruncDay(TruncBase): + kind = "day" + + +class TruncDate(TruncBase): + kind = "date" + lookup_name = "date" + output_field = DateField() + + def as_sql(self, compiler, connection): + # Cast to date rather than truncate to date. + sql, params = compiler.compile(self.lhs) + tzname = self.get_tzname() + return connection.ops.datetime_cast_date_sql(sql, tuple(params), tzname) + + +class TruncTime(TruncBase): + kind = "time" + lookup_name = "time" + output_field = TimeField() + + def as_sql(self, compiler, connection): + # Cast to time rather than truncate to time. + sql, params = compiler.compile(self.lhs) + tzname = self.get_tzname() + return connection.ops.datetime_cast_time_sql(sql, tuple(params), tzname) + + +class TruncHour(TruncBase): + kind = "hour" + + +class TruncMinute(TruncBase): + kind = "minute" + + +class TruncSecond(TruncBase): + kind = "second" + + +DateTimeField.register_lookup(TruncDate) +DateTimeField.register_lookup(TruncTime) diff --git a/testbed/django__django/django/db/models/functions/math.py b/testbed/django__django/django/db/models/functions/math.py new file mode 100644 index 0000000000000000000000000000000000000000..460143ba5af030a73decf7246dc3b014b048add2 --- /dev/null +++ b/testbed/django__django/django/db/models/functions/math.py @@ -0,0 +1,212 @@ +import math + +from django.db.models.expressions import Func, Value +from django.db.models.fields import FloatField, IntegerField +from django.db.models.functions import Cast +from django.db.models.functions.mixins import ( + FixDecimalInputMixin, + NumericOutputFieldMixin, +) +from django.db.models.lookups import Transform + + +class Abs(Transform): + function = "ABS" + lookup_name = "abs" + + +class ACos(NumericOutputFieldMixin, Transform): + function = "ACOS" + lookup_name = "acos" + + +class ASin(NumericOutputFieldMixin, Transform): + function = "ASIN" + lookup_name = "asin" + + +class ATan(NumericOutputFieldMixin, Transform): + function = "ATAN" + lookup_name = "atan" + + +class ATan2(NumericOutputFieldMixin, Func): + function = "ATAN2" + arity = 2 + + def as_sqlite(self, compiler, connection, **extra_context): + if not getattr( + connection.ops, "spatialite", False + ) or connection.ops.spatial_version >= (5, 0, 0): + return self.as_sql(compiler, connection) + # This function is usually ATan2(y, x), returning the inverse tangent + # of y / x, but it's ATan2(x, y) on SpatiaLite < 5.0.0. + # Cast integers to float to avoid inconsistent/buggy behavior if the + # arguments are mixed between integer and float or decimal. + # https://www.gaia-gis.it/fossil/libspatialite/tktview?name=0f72cca3a2 + clone = self.copy() + clone.set_source_expressions( + [ + Cast(expression, FloatField()) + if isinstance(expression.output_field, IntegerField) + else expression + for expression in self.get_source_expressions()[::-1] + ] + ) + return clone.as_sql(compiler, connection, **extra_context) + + +class Ceil(Transform): + function = "CEILING" + lookup_name = "ceil" + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="CEIL", **extra_context) + + +class Cos(NumericOutputFieldMixin, Transform): + function = "COS" + lookup_name = "cos" + + +class Cot(NumericOutputFieldMixin, Transform): + function = "COT" + lookup_name = "cot" + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, connection, template="(1 / TAN(%(expressions)s))", **extra_context + ) + + +class Degrees(NumericOutputFieldMixin, Transform): + function = "DEGREES" + lookup_name = "degrees" + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, + connection, + template="((%%(expressions)s) * 180 / %s)" % math.pi, + **extra_context, + ) + + +class Exp(NumericOutputFieldMixin, Transform): + function = "EXP" + lookup_name = "exp" + + +class Floor(Transform): + function = "FLOOR" + lookup_name = "floor" + + +class Ln(NumericOutputFieldMixin, Transform): + function = "LN" + lookup_name = "ln" + + +class Log(FixDecimalInputMixin, NumericOutputFieldMixin, Func): + function = "LOG" + arity = 2 + + def as_sqlite(self, compiler, connection, **extra_context): + if not getattr(connection.ops, "spatialite", False): + return self.as_sql(compiler, connection) + # This function is usually Log(b, x) returning the logarithm of x to + # the base b, but on SpatiaLite it's Log(x, b). + clone = self.copy() + clone.set_source_expressions(self.get_source_expressions()[::-1]) + return clone.as_sql(compiler, connection, **extra_context) + + +class Mod(FixDecimalInputMixin, NumericOutputFieldMixin, Func): + function = "MOD" + arity = 2 + + +class Pi(NumericOutputFieldMixin, Func): + function = "PI" + arity = 0 + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, connection, template=str(math.pi), **extra_context + ) + + +class Power(NumericOutputFieldMixin, Func): + function = "POWER" + arity = 2 + + +class Radians(NumericOutputFieldMixin, Transform): + function = "RADIANS" + lookup_name = "radians" + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, + connection, + template="((%%(expressions)s) * %s / 180)" % math.pi, + **extra_context, + ) + + +class Random(NumericOutputFieldMixin, Func): + function = "RANDOM" + arity = 0 + + def as_mysql(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="RAND", **extra_context) + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, connection, function="DBMS_RANDOM.VALUE", **extra_context + ) + + def as_sqlite(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="RAND", **extra_context) + + def get_group_by_cols(self): + return [] + + +class Round(FixDecimalInputMixin, Transform): + function = "ROUND" + lookup_name = "round" + arity = None # Override Transform's arity=1 to enable passing precision. + + def __init__(self, expression, precision=0, **extra): + super().__init__(expression, precision, **extra) + + def as_sqlite(self, compiler, connection, **extra_context): + precision = self.get_source_expressions()[1] + if isinstance(precision, Value) and precision.value < 0: + raise ValueError("SQLite does not support negative precision.") + return super().as_sqlite(compiler, connection, **extra_context) + + def _resolve_output_field(self): + source = self.get_source_expressions()[0] + return source.output_field + + +class Sign(Transform): + function = "SIGN" + lookup_name = "sign" + + +class Sin(NumericOutputFieldMixin, Transform): + function = "SIN" + lookup_name = "sin" + + +class Sqrt(NumericOutputFieldMixin, Transform): + function = "SQRT" + lookup_name = "sqrt" + + +class Tan(NumericOutputFieldMixin, Transform): + function = "TAN" + lookup_name = "tan" diff --git a/testbed/django__django/django/db/models/functions/mixins.py b/testbed/django__django/django/db/models/functions/mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..caf20e131d87faf4c7bbc14022d19630b1fea935 --- /dev/null +++ b/testbed/django__django/django/db/models/functions/mixins.py @@ -0,0 +1,57 @@ +import sys + +from django.db.models.fields import DecimalField, FloatField, IntegerField +from django.db.models.functions import Cast + + +class FixDecimalInputMixin: + def as_postgresql(self, compiler, connection, **extra_context): + # Cast FloatField to DecimalField as PostgreSQL doesn't support the + # following function signatures: + # - LOG(double, double) + # - MOD(double, double) + output_field = DecimalField(decimal_places=sys.float_info.dig, max_digits=1000) + clone = self.copy() + clone.set_source_expressions( + [ + Cast(expression, output_field) + if isinstance(expression.output_field, FloatField) + else expression + for expression in self.get_source_expressions() + ] + ) + return clone.as_sql(compiler, connection, **extra_context) + + +class FixDurationInputMixin: + def as_mysql(self, compiler, connection, **extra_context): + sql, params = super().as_sql(compiler, connection, **extra_context) + if self.output_field.get_internal_type() == "DurationField": + sql = "CAST(%s AS SIGNED)" % sql + return sql, params + + def as_oracle(self, compiler, connection, **extra_context): + if self.output_field.get_internal_type() == "DurationField": + expression = self.get_source_expressions()[0] + options = self._get_repr_options() + from django.db.backends.oracle.functions import ( + IntervalToSeconds, + SecondsToInterval, + ) + + return compiler.compile( + SecondsToInterval( + self.__class__(IntervalToSeconds(expression), **options) + ) + ) + return super().as_sql(compiler, connection, **extra_context) + + +class NumericOutputFieldMixin: + def _resolve_output_field(self): + source_fields = self.get_source_fields() + if any(isinstance(s, DecimalField) for s in source_fields): + return DecimalField() + if any(isinstance(s, IntegerField) for s in source_fields): + return FloatField() + return super()._resolve_output_field() if source_fields else FloatField() diff --git a/testbed/django__django/django/db/models/functions/text.py b/testbed/django__django/django/db/models/functions/text.py new file mode 100644 index 0000000000000000000000000000000000000000..2b49f54328780dda3f652f08b96f9fa97a9d1bc8 --- /dev/null +++ b/testbed/django__django/django/db/models/functions/text.py @@ -0,0 +1,369 @@ +from django.db import NotSupportedError +from django.db.models.expressions import Func, Value +from django.db.models.fields import CharField, IntegerField, TextField +from django.db.models.functions import Cast, Coalesce +from django.db.models.lookups import Transform + + +class MySQLSHA2Mixin: + def as_mysql(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, + connection, + template="SHA2(%%(expressions)s, %s)" % self.function[3:], + **extra_context, + ) + + +class OracleHashMixin: + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, + connection, + template=( + "LOWER(RAWTOHEX(STANDARD_HASH(UTL_I18N.STRING_TO_RAW(" + "%(expressions)s, 'AL32UTF8'), '%(function)s')))" + ), + **extra_context, + ) + + +class PostgreSQLSHAMixin: + def as_postgresql(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, + connection, + template="ENCODE(DIGEST(%(expressions)s, '%(function)s'), 'hex')", + function=self.function.lower(), + **extra_context, + ) + + +class Chr(Transform): + function = "CHR" + lookup_name = "chr" + output_field = CharField() + + def as_mysql(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, + connection, + function="CHAR", + template="%(function)s(%(expressions)s USING utf16)", + **extra_context, + ) + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, + connection, + template="%(function)s(%(expressions)s USING NCHAR_CS)", + **extra_context, + ) + + def as_sqlite(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="CHAR", **extra_context) + + +class ConcatPair(Func): + """ + Concatenate two arguments together. This is used by `Concat` because not + all backend databases support more than two arguments. + """ + + function = "CONCAT" + + def as_sqlite(self, compiler, connection, **extra_context): + coalesced = self.coalesce() + return super(ConcatPair, coalesced).as_sql( + compiler, + connection, + template="%(expressions)s", + arg_joiner=" || ", + **extra_context, + ) + + def as_postgresql(self, compiler, connection, **extra_context): + copy = self.copy() + copy.set_source_expressions( + [ + Cast(expression, TextField()) + for expression in copy.get_source_expressions() + ] + ) + return super(ConcatPair, copy).as_sql( + compiler, + connection, + **extra_context, + ) + + def as_mysql(self, compiler, connection, **extra_context): + # Use CONCAT_WS with an empty separator so that NULLs are ignored. + return super().as_sql( + compiler, + connection, + function="CONCAT_WS", + template="%(function)s('', %(expressions)s)", + **extra_context, + ) + + def coalesce(self): + # null on either side results in null for expression, wrap with coalesce + c = self.copy() + c.set_source_expressions( + [ + Coalesce(expression, Value("")) + for expression in c.get_source_expressions() + ] + ) + return c + + +class Concat(Func): + """ + Concatenate text fields together. Backends that result in an entire + null expression when any arguments are null will wrap each argument in + coalesce functions to ensure a non-null result. + """ + + function = None + template = "%(expressions)s" + + def __init__(self, *expressions, **extra): + if len(expressions) < 2: + raise ValueError("Concat must take at least two expressions") + paired = self._paired(expressions) + super().__init__(paired, **extra) + + def _paired(self, expressions): + # wrap pairs of expressions in successive concat functions + # exp = [a, b, c, d] + # -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d)))) + if len(expressions) == 2: + return ConcatPair(*expressions) + return ConcatPair(expressions[0], self._paired(expressions[1:])) + + +class Left(Func): + function = "LEFT" + arity = 2 + output_field = CharField() + + def __init__(self, expression, length, **extra): + """ + expression: the name of a field, or an expression returning a string + length: the number of characters to return from the start of the string + """ + if not hasattr(length, "resolve_expression"): + if length < 1: + raise ValueError("'length' must be greater than 0.") + super().__init__(expression, length, **extra) + + def get_substr(self): + return Substr(self.source_expressions[0], Value(1), self.source_expressions[1]) + + def as_oracle(self, compiler, connection, **extra_context): + return self.get_substr().as_oracle(compiler, connection, **extra_context) + + def as_sqlite(self, compiler, connection, **extra_context): + return self.get_substr().as_sqlite(compiler, connection, **extra_context) + + +class Length(Transform): + """Return the number of characters in the expression.""" + + function = "LENGTH" + lookup_name = "length" + output_field = IntegerField() + + def as_mysql(self, compiler, connection, **extra_context): + return super().as_sql( + compiler, connection, function="CHAR_LENGTH", **extra_context + ) + + +class Lower(Transform): + function = "LOWER" + lookup_name = "lower" + + +class LPad(Func): + function = "LPAD" + output_field = CharField() + + def __init__(self, expression, length, fill_text=Value(" "), **extra): + if ( + not hasattr(length, "resolve_expression") + and length is not None + and length < 0 + ): + raise ValueError("'length' must be greater or equal to 0.") + super().__init__(expression, length, fill_text, **extra) + + +class LTrim(Transform): + function = "LTRIM" + lookup_name = "ltrim" + + +class MD5(OracleHashMixin, Transform): + function = "MD5" + lookup_name = "md5" + + +class Ord(Transform): + function = "ASCII" + lookup_name = "ord" + output_field = IntegerField() + + def as_mysql(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="ORD", **extra_context) + + def as_sqlite(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="UNICODE", **extra_context) + + +class Repeat(Func): + function = "REPEAT" + output_field = CharField() + + def __init__(self, expression, number, **extra): + if ( + not hasattr(number, "resolve_expression") + and number is not None + and number < 0 + ): + raise ValueError("'number' must be greater or equal to 0.") + super().__init__(expression, number, **extra) + + def as_oracle(self, compiler, connection, **extra_context): + expression, number = self.source_expressions + length = None if number is None else Length(expression) * number + rpad = RPad(expression, length, expression) + return rpad.as_sql(compiler, connection, **extra_context) + + +class Replace(Func): + function = "REPLACE" + + def __init__(self, expression, text, replacement=Value(""), **extra): + super().__init__(expression, text, replacement, **extra) + + +class Reverse(Transform): + function = "REVERSE" + lookup_name = "reverse" + + def as_oracle(self, compiler, connection, **extra_context): + # REVERSE in Oracle is undocumented and doesn't support multi-byte + # strings. Use a special subquery instead. + sql, params = super().as_sql( + compiler, + connection, + template=( + "(SELECT LISTAGG(s) WITHIN GROUP (ORDER BY n DESC) FROM " + "(SELECT LEVEL n, SUBSTR(%(expressions)s, LEVEL, 1) s " + "FROM DUAL CONNECT BY LEVEL <= LENGTH(%(expressions)s)) " + "GROUP BY %(expressions)s)" + ), + **extra_context, + ) + return sql, params * 3 + + +class Right(Left): + function = "RIGHT" + + def get_substr(self): + return Substr( + self.source_expressions[0], + self.source_expressions[1] * Value(-1), + self.source_expressions[1], + ) + + +class RPad(LPad): + function = "RPAD" + + +class RTrim(Transform): + function = "RTRIM" + lookup_name = "rtrim" + + +class SHA1(OracleHashMixin, PostgreSQLSHAMixin, Transform): + function = "SHA1" + lookup_name = "sha1" + + +class SHA224(MySQLSHA2Mixin, PostgreSQLSHAMixin, Transform): + function = "SHA224" + lookup_name = "sha224" + + def as_oracle(self, compiler, connection, **extra_context): + raise NotSupportedError("SHA224 is not supported on Oracle.") + + +class SHA256(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): + function = "SHA256" + lookup_name = "sha256" + + +class SHA384(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): + function = "SHA384" + lookup_name = "sha384" + + +class SHA512(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): + function = "SHA512" + lookup_name = "sha512" + + +class StrIndex(Func): + """ + Return a positive integer corresponding to the 1-indexed position of the + first occurrence of a substring inside another string, or 0 if the + substring is not found. + """ + + function = "INSTR" + arity = 2 + output_field = IntegerField() + + def as_postgresql(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="STRPOS", **extra_context) + + +class Substr(Func): + function = "SUBSTRING" + output_field = CharField() + + def __init__(self, expression, pos, length=None, **extra): + """ + expression: the name of a field, or an expression returning a string + pos: an integer > 0, or an expression returning an integer + length: an optional number of characters to return + """ + if not hasattr(pos, "resolve_expression"): + if pos < 1: + raise ValueError("'pos' must be greater than 0") + expressions = [expression, pos] + if length is not None: + expressions.append(length) + super().__init__(*expressions, **extra) + + def as_sqlite(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="SUBSTR", **extra_context) + + def as_oracle(self, compiler, connection, **extra_context): + return super().as_sql(compiler, connection, function="SUBSTR", **extra_context) + + +class Trim(Transform): + function = "TRIM" + lookup_name = "trim" + + +class Upper(Transform): + function = "UPPER" + lookup_name = "upper" diff --git a/testbed/django__django/django/db/models/functions/window.py b/testbed/django__django/django/db/models/functions/window.py new file mode 100644 index 0000000000000000000000000000000000000000..671017aba7484adc17362e583561ded181bc38fd --- /dev/null +++ b/testbed/django__django/django/db/models/functions/window.py @@ -0,0 +1,120 @@ +from django.db.models.expressions import Func +from django.db.models.fields import FloatField, IntegerField + +__all__ = [ + "CumeDist", + "DenseRank", + "FirstValue", + "Lag", + "LastValue", + "Lead", + "NthValue", + "Ntile", + "PercentRank", + "Rank", + "RowNumber", +] + + +class CumeDist(Func): + function = "CUME_DIST" + output_field = FloatField() + window_compatible = True + + +class DenseRank(Func): + function = "DENSE_RANK" + output_field = IntegerField() + window_compatible = True + + +class FirstValue(Func): + arity = 1 + function = "FIRST_VALUE" + window_compatible = True + + +class LagLeadFunction(Func): + window_compatible = True + + def __init__(self, expression, offset=1, default=None, **extra): + if expression is None: + raise ValueError( + "%s requires a non-null source expression." % self.__class__.__name__ + ) + if offset is None or offset <= 0: + raise ValueError( + "%s requires a positive integer for the offset." + % self.__class__.__name__ + ) + args = (expression, offset) + if default is not None: + args += (default,) + super().__init__(*args, **extra) + + def _resolve_output_field(self): + sources = self.get_source_expressions() + return sources[0].output_field + + +class Lag(LagLeadFunction): + function = "LAG" + + +class LastValue(Func): + arity = 1 + function = "LAST_VALUE" + window_compatible = True + + +class Lead(LagLeadFunction): + function = "LEAD" + + +class NthValue(Func): + function = "NTH_VALUE" + window_compatible = True + + def __init__(self, expression, nth=1, **extra): + if expression is None: + raise ValueError( + "%s requires a non-null source expression." % self.__class__.__name__ + ) + if nth is None or nth <= 0: + raise ValueError( + "%s requires a positive integer as for nth." % self.__class__.__name__ + ) + super().__init__(expression, nth, **extra) + + def _resolve_output_field(self): + sources = self.get_source_expressions() + return sources[0].output_field + + +class Ntile(Func): + function = "NTILE" + output_field = IntegerField() + window_compatible = True + + def __init__(self, num_buckets=1, **extra): + if num_buckets <= 0: + raise ValueError("num_buckets must be greater than 0.") + super().__init__(num_buckets, **extra) + + +class PercentRank(Func): + function = "PERCENT_RANK" + output_field = FloatField() + window_compatible = True + + +class Rank(Func): + function = "RANK" + output_field = IntegerField() + window_compatible = True + + +class RowNumber(Func): + function = "ROW_NUMBER" + output_field = IntegerField() + window_compatible = True diff --git a/testbed/django__django/django/db/models/indexes.py b/testbed/django__django/django/db/models/indexes.py new file mode 100644 index 0000000000000000000000000000000000000000..b5451f9e2410a62caec9bc6a901b57cad10d81b0 --- /dev/null +++ b/testbed/django__django/django/db/models/indexes.py @@ -0,0 +1,297 @@ +from types import NoneType + +from django.db.backends.utils import names_digest, split_identifier +from django.db.models.expressions import Col, ExpressionList, F, Func, OrderBy +from django.db.models.functions import Collate +from django.db.models.query_utils import Q +from django.db.models.sql import Query +from django.utils.functional import partition + +__all__ = ["Index"] + + +class Index: + suffix = "idx" + # The max length of the name of the index (restricted to 30 for + # cross-database compatibility with Oracle) + max_name_length = 30 + + def __init__( + self, + *expressions, + fields=(), + name=None, + db_tablespace=None, + opclasses=(), + condition=None, + include=None, + ): + if opclasses and not name: + raise ValueError("An index must be named to use opclasses.") + if not isinstance(condition, (NoneType, Q)): + raise ValueError("Index.condition must be a Q instance.") + if condition and not name: + raise ValueError("An index must be named to use condition.") + if not isinstance(fields, (list, tuple)): + raise ValueError("Index.fields must be a list or tuple.") + if not isinstance(opclasses, (list, tuple)): + raise ValueError("Index.opclasses must be a list or tuple.") + if not expressions and not fields: + raise ValueError( + "At least one field or expression is required to define an index." + ) + if expressions and fields: + raise ValueError( + "Index.fields and expressions are mutually exclusive.", + ) + if expressions and not name: + raise ValueError("An index must be named to use expressions.") + if expressions and opclasses: + raise ValueError( + "Index.opclasses cannot be used with expressions. Use " + "django.contrib.postgres.indexes.OpClass() instead." + ) + if opclasses and len(fields) != len(opclasses): + raise ValueError( + "Index.fields and Index.opclasses must have the same number of " + "elements." + ) + if fields and not all(isinstance(field, str) for field in fields): + raise ValueError("Index.fields must contain only strings with field names.") + if include and not name: + raise ValueError("A covering index must be named.") + if not isinstance(include, (NoneType, list, tuple)): + raise ValueError("Index.include must be a list or tuple.") + self.fields = list(fields) + # A list of 2-tuple with the field name and ordering ('' or 'DESC'). + self.fields_orders = [ + (field_name.removeprefix("-"), "DESC" if field_name.startswith("-") else "") + for field_name in self.fields + ] + self.name = name or "" + self.db_tablespace = db_tablespace + self.opclasses = opclasses + self.condition = condition + self.include = tuple(include) if include else () + self.expressions = tuple( + F(expression) if isinstance(expression, str) else expression + for expression in expressions + ) + + @property + def contains_expressions(self): + return bool(self.expressions) + + def _get_condition_sql(self, model, schema_editor): + if self.condition is None: + return None + query = Query(model=model, alias_cols=False) + where = query.build_where(self.condition) + compiler = query.get_compiler(connection=schema_editor.connection) + sql, params = where.as_sql(compiler, schema_editor.connection) + return sql % tuple(schema_editor.quote_value(p) for p in params) + + def create_sql(self, model, schema_editor, using="", **kwargs): + include = [ + model._meta.get_field(field_name).column for field_name in self.include + ] + condition = self._get_condition_sql(model, schema_editor) + if self.expressions: + index_expressions = [] + for expression in self.expressions: + index_expression = IndexExpression(expression) + index_expression.set_wrapper_classes(schema_editor.connection) + index_expressions.append(index_expression) + expressions = ExpressionList(*index_expressions).resolve_expression( + Query(model, alias_cols=False), + ) + fields = None + col_suffixes = None + else: + fields = [ + model._meta.get_field(field_name) + for field_name, _ in self.fields_orders + ] + if schema_editor.connection.features.supports_index_column_ordering: + col_suffixes = [order[1] for order in self.fields_orders] + else: + col_suffixes = [""] * len(self.fields_orders) + expressions = None + return schema_editor._create_index_sql( + model, + fields=fields, + name=self.name, + using=using, + db_tablespace=self.db_tablespace, + col_suffixes=col_suffixes, + opclasses=self.opclasses, + condition=condition, + include=include, + expressions=expressions, + **kwargs, + ) + + def remove_sql(self, model, schema_editor, **kwargs): + return schema_editor._delete_index_sql(model, self.name, **kwargs) + + def deconstruct(self): + path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) + path = path.replace("django.db.models.indexes", "django.db.models") + kwargs = {"name": self.name} + if self.fields: + kwargs["fields"] = self.fields + if self.db_tablespace is not None: + kwargs["db_tablespace"] = self.db_tablespace + if self.opclasses: + kwargs["opclasses"] = self.opclasses + if self.condition: + kwargs["condition"] = self.condition + if self.include: + kwargs["include"] = self.include + return (path, self.expressions, kwargs) + + def clone(self): + """Create a copy of this Index.""" + _, args, kwargs = self.deconstruct() + return self.__class__(*args, **kwargs) + + def set_name_with_model(self, model): + """ + Generate a unique name for the index. + + The name is divided into 3 parts - table name (12 chars), field name + (8 chars) and unique hash + suffix (10 chars). Each part is made to + fit its size by truncating the excess length. + """ + _, table_name = split_identifier(model._meta.db_table) + column_names = [ + model._meta.get_field(field_name).column + for field_name, order in self.fields_orders + ] + column_names_with_order = [ + (("-%s" if order else "%s") % column_name) + for column_name, (field_name, order) in zip( + column_names, self.fields_orders + ) + ] + # The length of the parts of the name is based on the default max + # length of 30 characters. + hash_data = [table_name] + column_names_with_order + [self.suffix] + self.name = "%s_%s_%s" % ( + table_name[:11], + column_names[0][:7], + "%s_%s" % (names_digest(*hash_data, length=6), self.suffix), + ) + if len(self.name) > self.max_name_length: + raise ValueError( + "Index too long for multiple database support. Is self.suffix " + "longer than 3 characters?" + ) + if self.name[0] == "_" or self.name[0].isdigit(): + self.name = "D%s" % self.name[1:] + + def __repr__(self): + return "<%s:%s%s%s%s%s%s%s>" % ( + self.__class__.__qualname__, + "" if not self.fields else " fields=%s" % repr(self.fields), + "" if not self.expressions else " expressions=%s" % repr(self.expressions), + "" if not self.name else " name=%s" % repr(self.name), + "" + if self.db_tablespace is None + else " db_tablespace=%s" % repr(self.db_tablespace), + "" if self.condition is None else " condition=%s" % self.condition, + "" if not self.include else " include=%s" % repr(self.include), + "" if not self.opclasses else " opclasses=%s" % repr(self.opclasses), + ) + + def __eq__(self, other): + if self.__class__ == other.__class__: + return self.deconstruct() == other.deconstruct() + return NotImplemented + + +class IndexExpression(Func): + """Order and wrap expressions for CREATE INDEX statements.""" + + template = "%(expressions)s" + wrapper_classes = (OrderBy, Collate) + + def set_wrapper_classes(self, connection=None): + # Some databases (e.g. MySQL) treats COLLATE as an indexed expression. + if connection and connection.features.collate_as_index_expression: + self.wrapper_classes = tuple( + [ + wrapper_cls + for wrapper_cls in self.wrapper_classes + if wrapper_cls is not Collate + ] + ) + + @classmethod + def register_wrappers(cls, *wrapper_classes): + cls.wrapper_classes = wrapper_classes + + def resolve_expression( + self, + query=None, + allow_joins=True, + reuse=None, + summarize=False, + for_save=False, + ): + expressions = list(self.flatten()) + # Split expressions and wrappers. + index_expressions, wrappers = partition( + lambda e: isinstance(e, self.wrapper_classes), + expressions, + ) + wrapper_types = [type(wrapper) for wrapper in wrappers] + if len(wrapper_types) != len(set(wrapper_types)): + raise ValueError( + "Multiple references to %s can't be used in an indexed " + "expression." + % ", ".join( + [wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes] + ) + ) + if expressions[1 : len(wrappers) + 1] != wrappers: + raise ValueError( + "%s must be topmost expressions in an indexed expression." + % ", ".join( + [wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes] + ) + ) + # Wrap expressions in parentheses if they are not column references. + root_expression = index_expressions[1] + resolve_root_expression = root_expression.resolve_expression( + query, + allow_joins, + reuse, + summarize, + for_save, + ) + if not isinstance(resolve_root_expression, Col): + root_expression = Func(root_expression, template="(%(expressions)s)") + + if wrappers: + # Order wrappers and set their expressions. + wrappers = sorted( + wrappers, + key=lambda w: self.wrapper_classes.index(type(w)), + ) + wrappers = [wrapper.copy() for wrapper in wrappers] + for i, wrapper in enumerate(wrappers[:-1]): + wrapper.set_source_expressions([wrappers[i + 1]]) + # Set the root expression on the deepest wrapper. + wrappers[-1].set_source_expressions([root_expression]) + self.set_source_expressions([wrappers[0]]) + else: + # Use the root expression, if there are no wrappers. + self.set_source_expressions([root_expression]) + return super().resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + + def as_sqlite(self, compiler, connection, **extra_context): + # Casting to numeric is unnecessary. + return self.as_sql(compiler, connection, **extra_context) diff --git a/testbed/django__django/django/db/models/lookups.py b/testbed/django__django/django/db/models/lookups.py new file mode 100644 index 0000000000000000000000000000000000000000..91342a864af2a7977bc11d9bdb7f97d95f91cc4e --- /dev/null +++ b/testbed/django__django/django/db/models/lookups.py @@ -0,0 +1,757 @@ +import itertools +import math + +from django.core.exceptions import EmptyResultSet, FullResultSet +from django.db.models.expressions import Case, Expression, Func, Value, When +from django.db.models.fields import ( + BooleanField, + CharField, + DateTimeField, + Field, + IntegerField, + UUIDField, +) +from django.db.models.query_utils import RegisterLookupMixin +from django.utils.datastructures import OrderedSet +from django.utils.functional import cached_property +from django.utils.hashable import make_hashable + + +class Lookup(Expression): + lookup_name = None + prepare_rhs = True + can_use_none_as_rhs = False + + def __init__(self, lhs, rhs): + self.lhs, self.rhs = lhs, rhs + self.rhs = self.get_prep_lookup() + self.lhs = self.get_prep_lhs() + if hasattr(self.lhs, "get_bilateral_transforms"): + bilateral_transforms = self.lhs.get_bilateral_transforms() + else: + bilateral_transforms = [] + if bilateral_transforms: + # Warn the user as soon as possible if they are trying to apply + # a bilateral transformation on a nested QuerySet: that won't work. + from django.db.models.sql.query import Query # avoid circular import + + if isinstance(rhs, Query): + raise NotImplementedError( + "Bilateral transformations on nested querysets are not implemented." + ) + self.bilateral_transforms = bilateral_transforms + + def apply_bilateral_transforms(self, value): + for transform in self.bilateral_transforms: + value = transform(value) + return value + + def __repr__(self): + return f"{self.__class__.__name__}({self.lhs!r}, {self.rhs!r})" + + def batch_process_rhs(self, compiler, connection, rhs=None): + if rhs is None: + rhs = self.rhs + if self.bilateral_transforms: + sqls, sqls_params = [], [] + for p in rhs: + value = Value(p, output_field=self.lhs.output_field) + value = self.apply_bilateral_transforms(value) + value = value.resolve_expression(compiler.query) + sql, sql_params = compiler.compile(value) + sqls.append(sql) + sqls_params.extend(sql_params) + else: + _, params = self.get_db_prep_lookup(rhs, connection) + sqls, sqls_params = ["%s"] * len(params), params + return sqls, sqls_params + + def get_source_expressions(self): + if self.rhs_is_direct_value(): + return [self.lhs] + return [self.lhs, self.rhs] + + def set_source_expressions(self, new_exprs): + if len(new_exprs) == 1: + self.lhs = new_exprs[0] + else: + self.lhs, self.rhs = new_exprs + + def get_prep_lookup(self): + if not self.prepare_rhs or hasattr(self.rhs, "resolve_expression"): + return self.rhs + if hasattr(self.lhs, "output_field"): + if hasattr(self.lhs.output_field, "get_prep_value"): + return self.lhs.output_field.get_prep_value(self.rhs) + elif self.rhs_is_direct_value(): + return Value(self.rhs) + return self.rhs + + def get_prep_lhs(self): + if hasattr(self.lhs, "resolve_expression"): + return self.lhs + return Value(self.lhs) + + def get_db_prep_lookup(self, value, connection): + return ("%s", [value]) + + def process_lhs(self, compiler, connection, lhs=None): + lhs = lhs or self.lhs + if hasattr(lhs, "resolve_expression"): + lhs = lhs.resolve_expression(compiler.query) + sql, params = compiler.compile(lhs) + if isinstance(lhs, Lookup): + # Wrapped in parentheses to respect operator precedence. + sql = f"({sql})" + return sql, params + + def process_rhs(self, compiler, connection): + value = self.rhs + if self.bilateral_transforms: + if self.rhs_is_direct_value(): + # Do not call get_db_prep_lookup here as the value will be + # transformed before being used for lookup + value = Value(value, output_field=self.lhs.output_field) + value = self.apply_bilateral_transforms(value) + value = value.resolve_expression(compiler.query) + if hasattr(value, "as_sql"): + sql, params = compiler.compile(value) + # Ensure expression is wrapped in parentheses to respect operator + # precedence but avoid double wrapping as it can be misinterpreted + # on some backends (e.g. subqueries on SQLite). + if sql and sql[0] != "(": + sql = "(%s)" % sql + return sql, params + else: + return self.get_db_prep_lookup(value, connection) + + def rhs_is_direct_value(self): + return not hasattr(self.rhs, "as_sql") + + def get_group_by_cols(self): + cols = [] + for source in self.get_source_expressions(): + cols.extend(source.get_group_by_cols()) + return cols + + def as_oracle(self, compiler, connection): + # Oracle doesn't allow EXISTS() and filters to be compared to another + # expression unless they're wrapped in a CASE WHEN. + wrapped = False + exprs = [] + for expr in (self.lhs, self.rhs): + if connection.ops.conditional_expression_supported_in_where_clause(expr): + expr = Case(When(expr, then=True), default=False) + wrapped = True + exprs.append(expr) + lookup = type(self)(*exprs) if wrapped else self + return lookup.as_sql(compiler, connection) + + @cached_property + def output_field(self): + return BooleanField() + + @property + def identity(self): + return self.__class__, self.lhs, self.rhs + + def __eq__(self, other): + if not isinstance(other, Lookup): + return NotImplemented + return self.identity == other.identity + + def __hash__(self): + return hash(make_hashable(self.identity)) + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + c = self.copy() + c.is_summary = summarize + c.lhs = self.lhs.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + if hasattr(self.rhs, "resolve_expression"): + c.rhs = self.rhs.resolve_expression( + query, allow_joins, reuse, summarize, for_save + ) + return c + + def select_format(self, compiler, sql, params): + # Wrap filters with a CASE WHEN expression if a database backend + # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP + # BY list. + if not compiler.connection.features.supports_boolean_expr_in_select_clause: + sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END" + return sql, params + + @cached_property + def allowed_default(self): + return self.lhs.allowed_default and self.rhs.allowed_default + + +class Transform(RegisterLookupMixin, Func): + """ + RegisterLookupMixin() is first so that get_lookup() and get_transform() + first examine self and then check output_field. + """ + + bilateral = False + arity = 1 + + @property + def lhs(self): + return self.get_source_expressions()[0] + + def get_bilateral_transforms(self): + if hasattr(self.lhs, "get_bilateral_transforms"): + bilateral_transforms = self.lhs.get_bilateral_transforms() + else: + bilateral_transforms = [] + if self.bilateral: + bilateral_transforms.append(self.__class__) + return bilateral_transforms + + +class BuiltinLookup(Lookup): + def process_lhs(self, compiler, connection, lhs=None): + lhs_sql, params = super().process_lhs(compiler, connection, lhs) + field_internal_type = self.lhs.output_field.get_internal_type() + db_type = self.lhs.output_field.db_type(connection=connection) + lhs_sql = connection.ops.field_cast_sql(db_type, field_internal_type) % lhs_sql + lhs_sql = ( + connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql + ) + return lhs_sql, list(params) + + def as_sql(self, compiler, connection): + lhs_sql, params = self.process_lhs(compiler, connection) + rhs_sql, rhs_params = self.process_rhs(compiler, connection) + params.extend(rhs_params) + rhs_sql = self.get_rhs_op(connection, rhs_sql) + return "%s %s" % (lhs_sql, rhs_sql), params + + def get_rhs_op(self, connection, rhs): + return connection.operators[self.lookup_name] % rhs + + +class FieldGetDbPrepValueMixin: + """ + Some lookups require Field.get_db_prep_value() to be called on their + inputs. + """ + + get_db_prep_lookup_value_is_iterable = False + + def get_db_prep_lookup(self, value, connection): + # For relational fields, use the 'target_field' attribute of the + # output_field. + field = getattr(self.lhs.output_field, "target_field", None) + get_db_prep_value = ( + getattr(field, "get_db_prep_value", None) + or self.lhs.output_field.get_db_prep_value + ) + return ( + "%s", + [get_db_prep_value(v, connection, prepared=True) for v in value] + if self.get_db_prep_lookup_value_is_iterable + else [get_db_prep_value(value, connection, prepared=True)], + ) + + +class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin): + """ + Some lookups require Field.get_db_prep_value() to be called on each value + in an iterable. + """ + + get_db_prep_lookup_value_is_iterable = True + + def get_prep_lookup(self): + if hasattr(self.rhs, "resolve_expression"): + return self.rhs + prepared_values = [] + for rhs_value in self.rhs: + if hasattr(rhs_value, "resolve_expression"): + # An expression will be handled by the database but can coexist + # alongside real values. + pass + elif self.prepare_rhs and hasattr(self.lhs.output_field, "get_prep_value"): + rhs_value = self.lhs.output_field.get_prep_value(rhs_value) + prepared_values.append(rhs_value) + return prepared_values + + def process_rhs(self, compiler, connection): + if self.rhs_is_direct_value(): + # rhs should be an iterable of values. Use batch_process_rhs() + # to prepare/transform those values. + return self.batch_process_rhs(compiler, connection) + else: + return super().process_rhs(compiler, connection) + + def resolve_expression_parameter(self, compiler, connection, sql, param): + params = [param] + if hasattr(param, "resolve_expression"): + param = param.resolve_expression(compiler.query) + if hasattr(param, "as_sql"): + sql, params = compiler.compile(param) + return sql, params + + def batch_process_rhs(self, compiler, connection, rhs=None): + pre_processed = super().batch_process_rhs(compiler, connection, rhs) + # The params list may contain expressions which compile to a + # sql/param pair. Zip them to get sql and param pairs that refer to the + # same argument and attempt to replace them with the result of + # compiling the param step. + sql, params = zip( + *( + self.resolve_expression_parameter(compiler, connection, sql, param) + for sql, param in zip(*pre_processed) + ) + ) + params = itertools.chain.from_iterable(params) + return sql, tuple(params) + + +class PostgresOperatorLookup(Lookup): + """Lookup defined by operators on PostgreSQL.""" + + postgres_operator = None + + def as_postgresql(self, compiler, connection): + lhs, lhs_params = self.process_lhs(compiler, connection) + rhs, rhs_params = self.process_rhs(compiler, connection) + params = tuple(lhs_params) + tuple(rhs_params) + return "%s %s %s" % (lhs, self.postgres_operator, rhs), params + + +@Field.register_lookup +class Exact(FieldGetDbPrepValueMixin, BuiltinLookup): + lookup_name = "exact" + + def get_prep_lookup(self): + from django.db.models.sql.query import Query # avoid circular import + + if isinstance(self.rhs, Query): + if self.rhs.has_limit_one(): + if not self.rhs.has_select_fields: + self.rhs.clear_select_clause() + self.rhs.add_fields(["pk"]) + else: + raise ValueError( + "The QuerySet value for an exact lookup must be limited to " + "one result using slicing." + ) + return super().get_prep_lookup() + + def as_sql(self, compiler, connection): + # Avoid comparison against direct rhs if lhs is a boolean value. That + # turns "boolfield__exact=True" into "WHERE boolean_field" instead of + # "WHERE boolean_field = True" when allowed. + if ( + isinstance(self.rhs, bool) + and getattr(self.lhs, "conditional", False) + and connection.ops.conditional_expression_supported_in_where_clause( + self.lhs + ) + ): + lhs_sql, params = self.process_lhs(compiler, connection) + template = "%s" if self.rhs else "NOT %s" + return template % lhs_sql, params + return super().as_sql(compiler, connection) + + +@Field.register_lookup +class IExact(BuiltinLookup): + lookup_name = "iexact" + prepare_rhs = False + + def process_rhs(self, qn, connection): + rhs, params = super().process_rhs(qn, connection) + if params: + params[0] = connection.ops.prep_for_iexact_query(params[0]) + return rhs, params + + +@Field.register_lookup +class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup): + lookup_name = "gt" + + +@Field.register_lookup +class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): + lookup_name = "gte" + + +@Field.register_lookup +class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup): + lookup_name = "lt" + + +@Field.register_lookup +class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): + lookup_name = "lte" + + +class IntegerFieldOverflow: + underflow_exception = EmptyResultSet + overflow_exception = EmptyResultSet + + def process_rhs(self, compiler, connection): + rhs = self.rhs + if isinstance(rhs, int): + field_internal_type = self.lhs.output_field.get_internal_type() + min_value, max_value = connection.ops.integer_field_range( + field_internal_type + ) + if min_value is not None and rhs < min_value: + raise self.underflow_exception + if max_value is not None and rhs > max_value: + raise self.overflow_exception + return super().process_rhs(compiler, connection) + + +class IntegerFieldFloatRounding: + """ + Allow floats to work as query values for IntegerField. Without this, the + decimal portion of the float would always be discarded. + """ + + def get_prep_lookup(self): + if isinstance(self.rhs, float): + self.rhs = math.ceil(self.rhs) + return super().get_prep_lookup() + + +@IntegerField.register_lookup +class IntegerFieldExact(IntegerFieldOverflow, Exact): + pass + + +@IntegerField.register_lookup +class IntegerGreaterThan(IntegerFieldOverflow, GreaterThan): + underflow_exception = FullResultSet + + +@IntegerField.register_lookup +class IntegerGreaterThanOrEqual( + IntegerFieldOverflow, IntegerFieldFloatRounding, GreaterThanOrEqual +): + underflow_exception = FullResultSet + + +@IntegerField.register_lookup +class IntegerLessThan(IntegerFieldOverflow, IntegerFieldFloatRounding, LessThan): + overflow_exception = FullResultSet + + +@IntegerField.register_lookup +class IntegerLessThanOrEqual(IntegerFieldOverflow, LessThanOrEqual): + overflow_exception = FullResultSet + + +@Field.register_lookup +class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup): + lookup_name = "in" + + def get_prep_lookup(self): + from django.db.models.sql.query import Query # avoid circular import + + if isinstance(self.rhs, Query): + self.rhs.clear_ordering(clear_default=True) + if not self.rhs.has_select_fields: + self.rhs.clear_select_clause() + self.rhs.add_fields(["pk"]) + return super().get_prep_lookup() + + def process_rhs(self, compiler, connection): + db_rhs = getattr(self.rhs, "_db", None) + if db_rhs is not None and db_rhs != connection.alias: + raise ValueError( + "Subqueries aren't allowed across different databases. Force " + "the inner query to be evaluated using `list(inner_query)`." + ) + + if self.rhs_is_direct_value(): + # Remove None from the list as NULL is never equal to anything. + try: + rhs = OrderedSet(self.rhs) + rhs.discard(None) + except TypeError: # Unhashable items in self.rhs + rhs = [r for r in self.rhs if r is not None] + + if not rhs: + raise EmptyResultSet + + # rhs should be an iterable; use batch_process_rhs() to + # prepare/transform those values. + sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs) + placeholder = "(" + ", ".join(sqls) + ")" + return (placeholder, sqls_params) + return super().process_rhs(compiler, connection) + + def get_rhs_op(self, connection, rhs): + return "IN %s" % rhs + + def as_sql(self, compiler, connection): + max_in_list_size = connection.ops.max_in_list_size() + if ( + self.rhs_is_direct_value() + and max_in_list_size + and len(self.rhs) > max_in_list_size + ): + return self.split_parameter_list_as_sql(compiler, connection) + return super().as_sql(compiler, connection) + + def split_parameter_list_as_sql(self, compiler, connection): + # This is a special case for databases which limit the number of + # elements which can appear in an 'IN' clause. + max_in_list_size = connection.ops.max_in_list_size() + lhs, lhs_params = self.process_lhs(compiler, connection) + rhs, rhs_params = self.batch_process_rhs(compiler, connection) + in_clause_elements = ["("] + params = [] + for offset in range(0, len(rhs_params), max_in_list_size): + if offset > 0: + in_clause_elements.append(" OR ") + in_clause_elements.append("%s IN (" % lhs) + params.extend(lhs_params) + sqls = rhs[offset : offset + max_in_list_size] + sqls_params = rhs_params[offset : offset + max_in_list_size] + param_group = ", ".join(sqls) + in_clause_elements.append(param_group) + in_clause_elements.append(")") + params.extend(sqls_params) + in_clause_elements.append(")") + return "".join(in_clause_elements), params + + +class PatternLookup(BuiltinLookup): + param_pattern = "%%%s%%" + prepare_rhs = False + + def get_rhs_op(self, connection, rhs): + # Assume we are in startswith. We need to produce SQL like: + # col LIKE %s, ['thevalue%'] + # For python values we can (and should) do that directly in Python, + # but if the value is for example reference to other column, then + # we need to add the % pattern match to the lookup by something like + # col LIKE othercol || '%%' + # So, for Python values we don't need any special pattern, but for + # SQL reference values or SQL transformations we need the correct + # pattern added. + if hasattr(self.rhs, "as_sql") or self.bilateral_transforms: + pattern = connection.pattern_ops[self.lookup_name].format( + connection.pattern_esc + ) + return pattern.format(rhs) + else: + return super().get_rhs_op(connection, rhs) + + def process_rhs(self, qn, connection): + rhs, params = super().process_rhs(qn, connection) + if self.rhs_is_direct_value() and params and not self.bilateral_transforms: + params[0] = self.param_pattern % connection.ops.prep_for_like_query( + params[0] + ) + return rhs, params + + +@Field.register_lookup +class Contains(PatternLookup): + lookup_name = "contains" + + +@Field.register_lookup +class IContains(Contains): + lookup_name = "icontains" + + +@Field.register_lookup +class StartsWith(PatternLookup): + lookup_name = "startswith" + param_pattern = "%s%%" + + +@Field.register_lookup +class IStartsWith(StartsWith): + lookup_name = "istartswith" + + +@Field.register_lookup +class EndsWith(PatternLookup): + lookup_name = "endswith" + param_pattern = "%%%s" + + +@Field.register_lookup +class IEndsWith(EndsWith): + lookup_name = "iendswith" + + +@Field.register_lookup +class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup): + lookup_name = "range" + + def get_rhs_op(self, connection, rhs): + return "BETWEEN %s AND %s" % (rhs[0], rhs[1]) + + +@Field.register_lookup +class IsNull(BuiltinLookup): + lookup_name = "isnull" + prepare_rhs = False + + def as_sql(self, compiler, connection): + if not isinstance(self.rhs, bool): + raise ValueError( + "The QuerySet value for an isnull lookup must be True or False." + ) + sql, params = self.process_lhs(compiler, connection) + if self.rhs: + return "%s IS NULL" % sql, params + else: + return "%s IS NOT NULL" % sql, params + + +@Field.register_lookup +class Regex(BuiltinLookup): + lookup_name = "regex" + prepare_rhs = False + + def as_sql(self, compiler, connection): + if self.lookup_name in connection.operators: + return super().as_sql(compiler, connection) + else: + lhs, lhs_params = self.process_lhs(compiler, connection) + rhs, rhs_params = self.process_rhs(compiler, connection) + sql_template = connection.ops.regex_lookup(self.lookup_name) + return sql_template % (lhs, rhs), lhs_params + rhs_params + + +@Field.register_lookup +class IRegex(Regex): + lookup_name = "iregex" + + +class YearLookup(Lookup): + def year_lookup_bounds(self, connection, year): + from django.db.models.functions import ExtractIsoYear + + iso_year = isinstance(self.lhs, ExtractIsoYear) + output_field = self.lhs.lhs.output_field + if isinstance(output_field, DateTimeField): + bounds = connection.ops.year_lookup_bounds_for_datetime_field( + year, + iso_year=iso_year, + ) + else: + bounds = connection.ops.year_lookup_bounds_for_date_field( + year, + iso_year=iso_year, + ) + return bounds + + def as_sql(self, compiler, connection): + # Avoid the extract operation if the rhs is a direct value to allow + # indexes to be used. + if self.rhs_is_direct_value(): + # Skip the extract part by directly using the originating field, + # that is self.lhs.lhs. + lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs) + rhs_sql, _ = self.process_rhs(compiler, connection) + rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql) + start, finish = self.year_lookup_bounds(connection, self.rhs) + params.extend(self.get_bound_params(start, finish)) + return "%s %s" % (lhs_sql, rhs_sql), params + return super().as_sql(compiler, connection) + + def get_direct_rhs_sql(self, connection, rhs): + return connection.operators[self.lookup_name] % rhs + + def get_bound_params(self, start, finish): + raise NotImplementedError( + "subclasses of YearLookup must provide a get_bound_params() method" + ) + + +class YearExact(YearLookup, Exact): + def get_direct_rhs_sql(self, connection, rhs): + return "BETWEEN %s AND %s" + + def get_bound_params(self, start, finish): + return (start, finish) + + +class YearGt(YearLookup, GreaterThan): + def get_bound_params(self, start, finish): + return (finish,) + + +class YearGte(YearLookup, GreaterThanOrEqual): + def get_bound_params(self, start, finish): + return (start,) + + +class YearLt(YearLookup, LessThan): + def get_bound_params(self, start, finish): + return (start,) + + +class YearLte(YearLookup, LessThanOrEqual): + def get_bound_params(self, start, finish): + return (finish,) + + +class UUIDTextMixin: + """ + Strip hyphens from a value when filtering a UUIDField on backends without + a native datatype for UUID. + """ + + def process_rhs(self, qn, connection): + if not connection.features.has_native_uuid_field: + from django.db.models.functions import Replace + + if self.rhs_is_direct_value(): + self.rhs = Value(self.rhs) + self.rhs = Replace( + self.rhs, Value("-"), Value(""), output_field=CharField() + ) + rhs, params = super().process_rhs(qn, connection) + return rhs, params + + +@UUIDField.register_lookup +class UUIDIExact(UUIDTextMixin, IExact): + pass + + +@UUIDField.register_lookup +class UUIDContains(UUIDTextMixin, Contains): + pass + + +@UUIDField.register_lookup +class UUIDIContains(UUIDTextMixin, IContains): + pass + + +@UUIDField.register_lookup +class UUIDStartsWith(UUIDTextMixin, StartsWith): + pass + + +@UUIDField.register_lookup +class UUIDIStartsWith(UUIDTextMixin, IStartsWith): + pass + + +@UUIDField.register_lookup +class UUIDEndsWith(UUIDTextMixin, EndsWith): + pass + + +@UUIDField.register_lookup +class UUIDIEndsWith(UUIDTextMixin, IEndsWith): + pass diff --git a/testbed/django__django/django/db/models/manager.py b/testbed/django__django/django/db/models/manager.py new file mode 100644 index 0000000000000000000000000000000000000000..467e79f9b9f7773598595ae364fe391ded78aeaf --- /dev/null +++ b/testbed/django__django/django/db/models/manager.py @@ -0,0 +1,213 @@ +import copy +import inspect +from functools import wraps +from importlib import import_module + +from django.db import router +from django.db.models.query import QuerySet + + +class BaseManager: + # To retain order, track each time a Manager instance is created. + creation_counter = 0 + + # Set to True for the 'objects' managers that are automatically created. + auto_created = False + + #: If set to True the manager will be serialized into migrations and will + #: thus be available in e.g. RunPython operations. + use_in_migrations = False + + def __new__(cls, *args, **kwargs): + # Capture the arguments to make returning them trivial. + obj = super().__new__(cls) + obj._constructor_args = (args, kwargs) + return obj + + def __init__(self): + super().__init__() + self._set_creation_counter() + self.model = None + self.name = None + self._db = None + self._hints = {} + + def __str__(self): + """Return "app_label.model_label.manager_name".""" + return "%s.%s" % (self.model._meta.label, self.name) + + def __class_getitem__(cls, *args, **kwargs): + return cls + + def deconstruct(self): + """ + Return a 5-tuple of the form (as_manager (True), manager_class, + queryset_class, args, kwargs). + + Raise a ValueError if the manager is dynamically generated. + """ + qs_class = self._queryset_class + if getattr(self, "_built_with_as_manager", False): + # using MyQuerySet.as_manager() + return ( + True, # as_manager + None, # manager_class + "%s.%s" % (qs_class.__module__, qs_class.__name__), # qs_class + None, # args + None, # kwargs + ) + else: + module_name = self.__module__ + name = self.__class__.__name__ + # Make sure it's actually there and not an inner class + module = import_module(module_name) + if not hasattr(module, name): + raise ValueError( + "Could not find manager %s in %s.\n" + "Please note that you need to inherit from managers you " + "dynamically generated with 'from_queryset()'." + % (name, module_name) + ) + return ( + False, # as_manager + "%s.%s" % (module_name, name), # manager_class + None, # qs_class + self._constructor_args[0], # args + self._constructor_args[1], # kwargs + ) + + def check(self, **kwargs): + return [] + + @classmethod + def _get_queryset_methods(cls, queryset_class): + def create_method(name, method): + @wraps(method) + def manager_method(self, *args, **kwargs): + return getattr(self.get_queryset(), name)(*args, **kwargs) + + return manager_method + + new_methods = {} + for name, method in inspect.getmembers( + queryset_class, predicate=inspect.isfunction + ): + # Only copy missing methods. + if hasattr(cls, name): + continue + # Only copy public methods or methods with the attribute + # queryset_only=False. + queryset_only = getattr(method, "queryset_only", None) + if queryset_only or (queryset_only is None and name.startswith("_")): + continue + # Copy the method onto the manager. + new_methods[name] = create_method(name, method) + return new_methods + + @classmethod + def from_queryset(cls, queryset_class, class_name=None): + if class_name is None: + class_name = "%sFrom%s" % (cls.__name__, queryset_class.__name__) + return type( + class_name, + (cls,), + { + "_queryset_class": queryset_class, + **cls._get_queryset_methods(queryset_class), + }, + ) + + def contribute_to_class(self, cls, name): + self.name = self.name or name + self.model = cls + + setattr(cls, name, ManagerDescriptor(self)) + + cls._meta.add_manager(self) + + def _set_creation_counter(self): + """ + Set the creation counter value for this instance and increment the + class-level copy. + """ + self.creation_counter = BaseManager.creation_counter + BaseManager.creation_counter += 1 + + def db_manager(self, using=None, hints=None): + obj = copy.copy(self) + obj._db = using or self._db + obj._hints = hints or self._hints + return obj + + @property + def db(self): + return self._db or router.db_for_read(self.model, **self._hints) + + ####################### + # PROXIES TO QUERYSET # + ####################### + + def get_queryset(self): + """ + Return a new QuerySet object. Subclasses can override this method to + customize the behavior of the Manager. + """ + return self._queryset_class(model=self.model, using=self._db, hints=self._hints) + + def all(self): + # We can't proxy this method through the `QuerySet` like we do for the + # rest of the `QuerySet` methods. This is because `QuerySet.all()` + # works by creating a "copy" of the current queryset and in making said + # copy, all the cached `prefetch_related` lookups are lost. See the + # implementation of `RelatedManager.get_queryset()` for a better + # understanding of how this comes into play. + return self.get_queryset() + + def __eq__(self, other): + return ( + isinstance(other, self.__class__) + and self._constructor_args == other._constructor_args + ) + + def __hash__(self): + return id(self) + + +class Manager(BaseManager.from_queryset(QuerySet)): + pass + + +class ManagerDescriptor: + def __init__(self, manager): + self.manager = manager + + def __get__(self, instance, cls=None): + if instance is not None: + raise AttributeError( + "Manager isn't accessible via %s instances" % cls.__name__ + ) + + if cls._meta.abstract: + raise AttributeError( + "Manager isn't available; %s is abstract" % (cls._meta.object_name,) + ) + + if cls._meta.swapped: + raise AttributeError( + "Manager isn't available; '%s' has been swapped for '%s'" + % ( + cls._meta.label, + cls._meta.swapped, + ) + ) + + return cls._meta.managers_map[self.manager.name] + + +class EmptyManager(Manager): + def __init__(self, model): + super().__init__() + self.model = model + + def get_queryset(self): + return super().get_queryset().none() diff --git a/testbed/django__django/django/db/models/query.py b/testbed/django__django/django/db/models/query.py new file mode 100644 index 0000000000000000000000000000000000000000..395ba6e40494fca87c82e81ea686db98194afdb7 --- /dev/null +++ b/testbed/django__django/django/db/models/query.py @@ -0,0 +1,2647 @@ +""" +The main QuerySet implementation. This provides the public API for the ORM. +""" + +import copy +import operator +import warnings +from itertools import chain, islice + +from asgiref.sync import sync_to_async + +import django +from django.conf import settings +from django.core import exceptions +from django.db import ( + DJANGO_VERSION_PICKLE_KEY, + IntegrityError, + NotSupportedError, + connections, + router, + transaction, +) +from django.db.models import AutoField, DateField, DateTimeField, Field, sql +from django.db.models.constants import LOOKUP_SEP, OnConflict +from django.db.models.deletion import Collector +from django.db.models.expressions import Case, F, Value, When +from django.db.models.functions import Cast, Trunc +from django.db.models.query_utils import FilteredRelation, Q +from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE +from django.db.models.utils import ( + AltersData, + create_namedtuple_class, + resolve_callables, +) +from django.utils import timezone +from django.utils.functional import cached_property, partition + +# The maximum number of results to fetch in a get() query. +MAX_GET_RESULTS = 21 + +# The maximum number of items to display in a QuerySet.__repr__ +REPR_OUTPUT_SIZE = 20 + + +class BaseIterable: + def __init__( + self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE + ): + self.queryset = queryset + self.chunked_fetch = chunked_fetch + self.chunk_size = chunk_size + + async def _async_generator(self): + # Generators don't actually start running until the first time you call + # next() on them, so make the generator object in the async thread and + # then repeatedly dispatch to it in a sync thread. + sync_generator = self.__iter__() + + def next_slice(gen): + return list(islice(gen, self.chunk_size)) + + while True: + chunk = await sync_to_async(next_slice)(sync_generator) + for item in chunk: + yield item + if len(chunk) < self.chunk_size: + break + + # __aiter__() is a *synchronous* method that has to then return an + # *asynchronous* iterator/generator. Thus, nest an async generator inside + # it. + # This is a generic iterable converter for now, and is going to suffer a + # performance penalty on large sets of items due to the cost of crossing + # over the sync barrier for each chunk. Custom __aiter__() methods should + # be added to each Iterable subclass, but that needs some work in the + # Compiler first. + def __aiter__(self): + return self._async_generator() + + +class ModelIterable(BaseIterable): + """Iterable that yields a model instance for each row.""" + + def __iter__(self): + queryset = self.queryset + db = queryset.db + compiler = queryset.query.get_compiler(using=db) + # Execute the query. This will also fill compiler.select, klass_info, + # and annotations. + results = compiler.execute_sql( + chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size + ) + select, klass_info, annotation_col_map = ( + compiler.select, + compiler.klass_info, + compiler.annotation_col_map, + ) + model_cls = klass_info["model"] + select_fields = klass_info["select_fields"] + model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 + init_list = [ + f[0].target.attname for f in select[model_fields_start:model_fields_end] + ] + related_populators = get_related_populators(klass_info, select, db) + known_related_objects = [ + ( + field, + related_objs, + operator.attrgetter( + *[ + field.attname + if from_field == "self" + else queryset.model._meta.get_field(from_field).attname + for from_field in field.from_fields + ] + ), + ) + for field, related_objs in queryset._known_related_objects.items() + ] + for row in compiler.results_iter(results): + obj = model_cls.from_db( + db, init_list, row[model_fields_start:model_fields_end] + ) + for rel_populator in related_populators: + rel_populator.populate(row, obj) + if annotation_col_map: + for attr_name, col_pos in annotation_col_map.items(): + setattr(obj, attr_name, row[col_pos]) + + # Add the known related objects to the model. + for field, rel_objs, rel_getter in known_related_objects: + # Avoid overwriting objects loaded by, e.g., select_related(). + if field.is_cached(obj): + continue + rel_obj_id = rel_getter(obj) + try: + rel_obj = rel_objs[rel_obj_id] + except KeyError: + pass # May happen in qs1 | qs2 scenarios. + else: + setattr(obj, field.name, rel_obj) + + yield obj + + +class RawModelIterable(BaseIterable): + """ + Iterable that yields a model instance for each row from a raw queryset. + """ + + def __iter__(self): + # Cache some things for performance reasons outside the loop. + db = self.queryset.db + query = self.queryset.query + connection = connections[db] + compiler = connection.ops.compiler("SQLCompiler")(query, connection, db) + query_iterator = iter(query) + + try: + ( + model_init_names, + model_init_pos, + annotation_fields, + ) = self.queryset.resolve_model_init_order() + model_cls = self.queryset.model + if model_cls._meta.pk.attname not in model_init_names: + raise exceptions.FieldDoesNotExist( + "Raw query must include the primary key" + ) + fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns] + converters = compiler.get_converters( + [f.get_col(f.model._meta.db_table) if f else None for f in fields] + ) + if converters: + query_iterator = compiler.apply_converters(query_iterator, converters) + for values in query_iterator: + # Associate fields to values + model_init_values = [values[pos] for pos in model_init_pos] + instance = model_cls.from_db(db, model_init_names, model_init_values) + if annotation_fields: + for column, pos in annotation_fields: + setattr(instance, column, values[pos]) + yield instance + finally: + # Done iterating the Query. If it has its own cursor, close it. + if hasattr(query, "cursor") and query.cursor: + query.cursor.close() + + +class ValuesIterable(BaseIterable): + """ + Iterable returned by QuerySet.values() that yields a dict for each row. + """ + + def __iter__(self): + queryset = self.queryset + query = queryset.query + compiler = query.get_compiler(queryset.db) + + # extra(select=...) cols are always at the start of the row. + names = [ + *query.extra_select, + *query.values_select, + *query.annotation_select, + ] + indexes = range(len(names)) + for row in compiler.results_iter( + chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size + ): + yield {names[i]: row[i] for i in indexes} + + +class ValuesListIterable(BaseIterable): + """ + Iterable returned by QuerySet.values_list(flat=False) that yields a tuple + for each row. + """ + + def __iter__(self): + queryset = self.queryset + query = queryset.query + compiler = query.get_compiler(queryset.db) + + if queryset._fields: + # extra(select=...) cols are always at the start of the row. + names = [ + *query.extra_select, + *query.values_select, + *query.annotation_select, + ] + fields = [ + *queryset._fields, + *(f for f in query.annotation_select if f not in queryset._fields), + ] + if fields != names: + # Reorder according to fields. + index_map = {name: idx for idx, name in enumerate(names)} + rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) + return map( + rowfactory, + compiler.results_iter( + chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size + ), + ) + return compiler.results_iter( + tuple_expected=True, + chunked_fetch=self.chunked_fetch, + chunk_size=self.chunk_size, + ) + + +class NamedValuesListIterable(ValuesListIterable): + """ + Iterable returned by QuerySet.values_list(named=True) that yields a + namedtuple for each row. + """ + + def __iter__(self): + queryset = self.queryset + if queryset._fields: + names = queryset._fields + else: + query = queryset.query + names = [ + *query.extra_select, + *query.values_select, + *query.annotation_select, + ] + tuple_class = create_namedtuple_class(*names) + new = tuple.__new__ + for row in super().__iter__(): + yield new(tuple_class, row) + + +class FlatValuesListIterable(BaseIterable): + """ + Iterable returned by QuerySet.values_list(flat=True) that yields single + values. + """ + + def __iter__(self): + queryset = self.queryset + compiler = queryset.query.get_compiler(queryset.db) + for row in compiler.results_iter( + chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size + ): + yield row[0] + + +class QuerySet(AltersData): + """Represent a lazy database lookup for a set of objects.""" + + def __init__(self, model=None, query=None, using=None, hints=None): + self.model = model + self._db = using + self._hints = hints or {} + self._query = query or sql.Query(self.model) + self._result_cache = None + self._sticky_filter = False + self._for_write = False + self._prefetch_related_lookups = () + self._prefetch_done = False + self._known_related_objects = {} # {rel_field: {pk: rel_obj}} + self._iterable_class = ModelIterable + self._fields = None + self._defer_next_filter = False + self._deferred_filter = None + + @property + def query(self): + if self._deferred_filter: + negate, args, kwargs = self._deferred_filter + self._filter_or_exclude_inplace(negate, args, kwargs) + self._deferred_filter = None + return self._query + + @query.setter + def query(self, value): + if value.values_select: + self._iterable_class = ValuesIterable + self._query = value + + def as_manager(cls): + # Address the circular dependency between `Queryset` and `Manager`. + from django.db.models.manager import Manager + + manager = Manager.from_queryset(cls)() + manager._built_with_as_manager = True + return manager + + as_manager.queryset_only = True + as_manager = classmethod(as_manager) + + ######################## + # PYTHON MAGIC METHODS # + ######################## + + def __deepcopy__(self, memo): + """Don't populate the QuerySet's cache.""" + obj = self.__class__() + for k, v in self.__dict__.items(): + if k == "_result_cache": + obj.__dict__[k] = None + else: + obj.__dict__[k] = copy.deepcopy(v, memo) + return obj + + def __getstate__(self): + # Force the cache to be fully populated. + self._fetch_all() + return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__} + + def __setstate__(self, state): + pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) + if pickled_version: + if pickled_version != django.__version__: + warnings.warn( + "Pickled queryset instance's Django version %s does not " + "match the current version %s." + % (pickled_version, django.__version__), + RuntimeWarning, + stacklevel=2, + ) + else: + warnings.warn( + "Pickled queryset instance's Django version is not specified.", + RuntimeWarning, + stacklevel=2, + ) + self.__dict__.update(state) + + def __repr__(self): + data = list(self[: REPR_OUTPUT_SIZE + 1]) + if len(data) > REPR_OUTPUT_SIZE: + data[-1] = "...(remaining elements truncated)..." + return "<%s %r>" % (self.__class__.__name__, data) + + def __len__(self): + self._fetch_all() + return len(self._result_cache) + + def __iter__(self): + """ + The queryset iterator protocol uses three nested iterators in the + default case: + 1. sql.compiler.execute_sql() + - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) + using cursor.fetchmany(). This part is responsible for + doing some column masking, and returning the rows in chunks. + 2. sql.compiler.results_iter() + - Returns one row at time. At this point the rows are still just + tuples. In some cases the return values are converted to + Python values at this location. + 3. self.iterator() + - Responsible for turning the rows into model objects. + """ + self._fetch_all() + return iter(self._result_cache) + + def __aiter__(self): + # Remember, __aiter__ itself is synchronous, it's the thing it returns + # that is async! + async def generator(): + await sync_to_async(self._fetch_all)() + for item in self._result_cache: + yield item + + return generator() + + def __bool__(self): + self._fetch_all() + return bool(self._result_cache) + + def __getitem__(self, k): + """Retrieve an item or slice from the set of results.""" + if not isinstance(k, (int, slice)): + raise TypeError( + "QuerySet indices must be integers or slices, not %s." + % type(k).__name__ + ) + if (isinstance(k, int) and k < 0) or ( + isinstance(k, slice) + and ( + (k.start is not None and k.start < 0) + or (k.stop is not None and k.stop < 0) + ) + ): + raise ValueError("Negative indexing is not supported.") + + if self._result_cache is not None: + return self._result_cache[k] + + if isinstance(k, slice): + qs = self._chain() + if k.start is not None: + start = int(k.start) + else: + start = None + if k.stop is not None: + stop = int(k.stop) + else: + stop = None + qs.query.set_limits(start, stop) + return list(qs)[:: k.step] if k.step else qs + + qs = self._chain() + qs.query.set_limits(k, k + 1) + qs._fetch_all() + return qs._result_cache[0] + + def __class_getitem__(cls, *args, **kwargs): + return cls + + def __and__(self, other): + self._check_operator_queryset(other, "&") + self._merge_sanity_check(other) + if isinstance(other, EmptyQuerySet): + return other + if isinstance(self, EmptyQuerySet): + return self + combined = self._chain() + combined._merge_known_related_objects(other) + combined.query.combine(other.query, sql.AND) + return combined + + def __or__(self, other): + self._check_operator_queryset(other, "|") + self._merge_sanity_check(other) + if isinstance(self, EmptyQuerySet): + return other + if isinstance(other, EmptyQuerySet): + return self + query = ( + self + if self.query.can_filter() + else self.model._base_manager.filter(pk__in=self.values("pk")) + ) + combined = query._chain() + combined._merge_known_related_objects(other) + if not other.query.can_filter(): + other = other.model._base_manager.filter(pk__in=other.values("pk")) + combined.query.combine(other.query, sql.OR) + return combined + + def __xor__(self, other): + self._check_operator_queryset(other, "^") + self._merge_sanity_check(other) + if isinstance(self, EmptyQuerySet): + return other + if isinstance(other, EmptyQuerySet): + return self + query = ( + self + if self.query.can_filter() + else self.model._base_manager.filter(pk__in=self.values("pk")) + ) + combined = query._chain() + combined._merge_known_related_objects(other) + if not other.query.can_filter(): + other = other.model._base_manager.filter(pk__in=other.values("pk")) + combined.query.combine(other.query, sql.XOR) + return combined + + #################################### + # METHODS THAT DO DATABASE QUERIES # + #################################### + + def _iterator(self, use_chunked_fetch, chunk_size): + iterable = self._iterable_class( + self, + chunked_fetch=use_chunked_fetch, + chunk_size=chunk_size or 2000, + ) + if not self._prefetch_related_lookups or chunk_size is None: + yield from iterable + return + + iterator = iter(iterable) + while results := list(islice(iterator, chunk_size)): + prefetch_related_objects(results, *self._prefetch_related_lookups) + yield from results + + def iterator(self, chunk_size=None): + """ + An iterator over the results from applying this QuerySet to the + database. chunk_size must be provided for QuerySets that prefetch + related objects. Otherwise, a default chunk_size of 2000 is supplied. + """ + if chunk_size is None: + if self._prefetch_related_lookups: + raise ValueError( + "chunk_size must be provided when using QuerySet.iterator() after " + "prefetch_related()." + ) + elif chunk_size <= 0: + raise ValueError("Chunk size must be strictly positive.") + use_chunked_fetch = not connections[self.db].settings_dict.get( + "DISABLE_SERVER_SIDE_CURSORS" + ) + return self._iterator(use_chunked_fetch, chunk_size) + + async def aiterator(self, chunk_size=2000): + """ + An asynchronous iterator over the results from applying this QuerySet + to the database. + """ + if self._prefetch_related_lookups: + raise NotSupportedError( + "Using QuerySet.aiterator() after prefetch_related() is not supported." + ) + if chunk_size <= 0: + raise ValueError("Chunk size must be strictly positive.") + use_chunked_fetch = not connections[self.db].settings_dict.get( + "DISABLE_SERVER_SIDE_CURSORS" + ) + async for item in self._iterable_class( + self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size + ): + yield item + + def aggregate(self, *args, **kwargs): + """ + Return a dictionary containing the calculations (aggregation) + over the current queryset. + + If args is present the expression is passed as a kwarg using + the Aggregate object's default alias. + """ + if self.query.distinct_fields: + raise NotImplementedError("aggregate() + distinct(fields) not implemented.") + self._validate_values_are_expressions( + (*args, *kwargs.values()), method_name="aggregate" + ) + for arg in args: + # The default_alias property raises TypeError if default_alias + # can't be set automatically or AttributeError if it isn't an + # attribute. + try: + arg.default_alias + except (AttributeError, TypeError): + raise TypeError("Complex aggregates require an alias") + kwargs[arg.default_alias] = arg + + return self.query.chain().get_aggregation(self.db, kwargs) + + async def aaggregate(self, *args, **kwargs): + return await sync_to_async(self.aggregate)(*args, **kwargs) + + def count(self): + """ + Perform a SELECT COUNT() and return the number of records as an + integer. + + If the QuerySet is already fully cached, return the length of the + cached results set to avoid multiple SELECT COUNT(*) calls. + """ + if self._result_cache is not None: + return len(self._result_cache) + + return self.query.get_count(using=self.db) + + async def acount(self): + return await sync_to_async(self.count)() + + def get(self, *args, **kwargs): + """ + Perform the query and return a single object matching the given + keyword arguments. + """ + if self.query.combinator and (args or kwargs): + raise NotSupportedError( + "Calling QuerySet.get(...) with filters after %s() is not " + "supported." % self.query.combinator + ) + clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) + if self.query.can_filter() and not self.query.distinct_fields: + clone = clone.order_by() + limit = None + if ( + not clone.query.select_for_update + or connections[clone.db].features.supports_select_for_update_with_limit + ): + limit = MAX_GET_RESULTS + clone.query.set_limits(high=limit) + num = len(clone) + if num == 1: + return clone._result_cache[0] + if not num: + raise self.model.DoesNotExist( + "%s matching query does not exist." % self.model._meta.object_name + ) + raise self.model.MultipleObjectsReturned( + "get() returned more than one %s -- it returned %s!" + % ( + self.model._meta.object_name, + num if not limit or num < limit else "more than %s" % (limit - 1), + ) + ) + + async def aget(self, *args, **kwargs): + return await sync_to_async(self.get)(*args, **kwargs) + + def create(self, **kwargs): + """ + Create a new object with the given kwargs, saving it to the database + and returning the created object. + """ + obj = self.model(**kwargs) + self._for_write = True + obj.save(force_insert=True, using=self.db) + return obj + + async def acreate(self, **kwargs): + return await sync_to_async(self.create)(**kwargs) + + def _prepare_for_bulk_create(self, objs): + from django.db.models.expressions import DatabaseDefault + + connection = connections[self.db] + for obj in objs: + if obj.pk is None: + # Populate new PK values. + obj.pk = obj._meta.pk.get_pk_value_on_save(obj) + if not connection.features.supports_default_keyword_in_bulk_insert: + for field in obj._meta.fields: + value = getattr(obj, field.attname) + if isinstance(value, DatabaseDefault): + setattr(obj, field.attname, field.db_default) + + obj._prepare_related_fields_for_save(operation_name="bulk_create") + + def _check_bulk_create_options( + self, ignore_conflicts, update_conflicts, update_fields, unique_fields + ): + if ignore_conflicts and update_conflicts: + raise ValueError( + "ignore_conflicts and update_conflicts are mutually exclusive." + ) + db_features = connections[self.db].features + if ignore_conflicts: + if not db_features.supports_ignore_conflicts: + raise NotSupportedError( + "This database backend does not support ignoring conflicts." + ) + return OnConflict.IGNORE + elif update_conflicts: + if not db_features.supports_update_conflicts: + raise NotSupportedError( + "This database backend does not support updating conflicts." + ) + if not update_fields: + raise ValueError( + "Fields that will be updated when a row insertion fails " + "on conflicts must be provided." + ) + if unique_fields and not db_features.supports_update_conflicts_with_target: + raise NotSupportedError( + "This database backend does not support updating " + "conflicts with specifying unique fields that can trigger " + "the upsert." + ) + if not unique_fields and db_features.supports_update_conflicts_with_target: + raise ValueError( + "Unique fields that can trigger the upsert must be provided." + ) + # Updating primary keys and non-concrete fields is forbidden. + if any(not f.concrete or f.many_to_many for f in update_fields): + raise ValueError( + "bulk_create() can only be used with concrete fields in " + "update_fields." + ) + if any(f.primary_key for f in update_fields): + raise ValueError( + "bulk_create() cannot be used with primary keys in " + "update_fields." + ) + if unique_fields: + if any(not f.concrete or f.many_to_many for f in unique_fields): + raise ValueError( + "bulk_create() can only be used with concrete fields " + "in unique_fields." + ) + return OnConflict.UPDATE + return None + + def bulk_create( + self, + objs, + batch_size=None, + ignore_conflicts=False, + update_conflicts=False, + update_fields=None, + unique_fields=None, + ): + """ + Insert each of the instances into the database. Do *not* call + save() on each of the instances, do not send any pre/post_save + signals, and do not set the primary key attribute if it is an + autoincrement field (except if features.can_return_rows_from_bulk_insert=True). + Multi-table models are not supported. + """ + # When you bulk insert you don't get the primary keys back (if it's an + # autoincrement, except if can_return_rows_from_bulk_insert=True), so + # you can't insert into the child tables which references this. There + # are two workarounds: + # 1) This could be implemented if you didn't have an autoincrement pk + # 2) You could do it by doing O(n) normal inserts into the parent + # tables to get the primary keys back and then doing a single bulk + # insert into the childmost table. + # We currently set the primary keys on the objects when using + # PostgreSQL via the RETURNING ID clause. It should be possible for + # Oracle as well, but the semantics for extracting the primary keys is + # trickier so it's not done yet. + if batch_size is not None and batch_size <= 0: + raise ValueError("Batch size must be a positive integer.") + # Check that the parents share the same concrete model with the our + # model to detect the inheritance pattern ConcreteGrandParent -> + # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy + # would not identify that case as involving multiple tables. + for parent in self.model._meta.get_parent_list(): + if parent._meta.concrete_model is not self.model._meta.concrete_model: + raise ValueError("Can't bulk create a multi-table inherited model") + if not objs: + return objs + opts = self.model._meta + if unique_fields: + # Primary key is allowed in unique_fields. + unique_fields = [ + self.model._meta.get_field(opts.pk.name if name == "pk" else name) + for name in unique_fields + ] + if update_fields: + update_fields = [self.model._meta.get_field(name) for name in update_fields] + on_conflict = self._check_bulk_create_options( + ignore_conflicts, + update_conflicts, + update_fields, + unique_fields, + ) + self._for_write = True + fields = opts.concrete_fields + objs = list(objs) + self._prepare_for_bulk_create(objs) + with transaction.atomic(using=self.db, savepoint=False): + objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) + if objs_with_pk: + returned_columns = self._batched_insert( + objs_with_pk, + fields, + batch_size, + on_conflict=on_conflict, + update_fields=update_fields, + unique_fields=unique_fields, + ) + for obj_with_pk, results in zip(objs_with_pk, returned_columns): + for result, field in zip(results, opts.db_returning_fields): + if field != opts.pk: + setattr(obj_with_pk, field.attname, result) + for obj_with_pk in objs_with_pk: + obj_with_pk._state.adding = False + obj_with_pk._state.db = self.db + if objs_without_pk: + fields = [f for f in fields if not isinstance(f, AutoField)] + returned_columns = self._batched_insert( + objs_without_pk, + fields, + batch_size, + on_conflict=on_conflict, + update_fields=update_fields, + unique_fields=unique_fields, + ) + connection = connections[self.db] + if ( + connection.features.can_return_rows_from_bulk_insert + and on_conflict is None + ): + assert len(returned_columns) == len(objs_without_pk) + for obj_without_pk, results in zip(objs_without_pk, returned_columns): + for result, field in zip(results, opts.db_returning_fields): + setattr(obj_without_pk, field.attname, result) + obj_without_pk._state.adding = False + obj_without_pk._state.db = self.db + + return objs + + async def abulk_create( + self, + objs, + batch_size=None, + ignore_conflicts=False, + update_conflicts=False, + update_fields=None, + unique_fields=None, + ): + return await sync_to_async(self.bulk_create)( + objs=objs, + batch_size=batch_size, + ignore_conflicts=ignore_conflicts, + update_conflicts=update_conflicts, + update_fields=update_fields, + unique_fields=unique_fields, + ) + + def bulk_update(self, objs, fields, batch_size=None): + """ + Update the given fields in each of the given objects in the database. + """ + if batch_size is not None and batch_size <= 0: + raise ValueError("Batch size must be a positive integer.") + if not fields: + raise ValueError("Field names must be given to bulk_update().") + objs = tuple(objs) + if any(obj.pk is None for obj in objs): + raise ValueError("All bulk_update() objects must have a primary key set.") + fields = [self.model._meta.get_field(name) for name in fields] + if any(not f.concrete or f.many_to_many for f in fields): + raise ValueError("bulk_update() can only be used with concrete fields.") + if any(f.primary_key for f in fields): + raise ValueError("bulk_update() cannot be used with primary key fields.") + if not objs: + return 0 + for obj in objs: + obj._prepare_related_fields_for_save( + operation_name="bulk_update", fields=fields + ) + # PK is used twice in the resulting update query, once in the filter + # and once in the WHEN. Each field will also have one CAST. + self._for_write = True + connection = connections[self.db] + max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs) + batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size + requires_casting = connection.features.requires_casted_case_in_updates + batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size)) + updates = [] + for batch_objs in batches: + update_kwargs = {} + for field in fields: + when_statements = [] + for obj in batch_objs: + attr = getattr(obj, field.attname) + if not hasattr(attr, "resolve_expression"): + attr = Value(attr, output_field=field) + when_statements.append(When(pk=obj.pk, then=attr)) + case_statement = Case(*when_statements, output_field=field) + if requires_casting: + case_statement = Cast(case_statement, output_field=field) + update_kwargs[field.attname] = case_statement + updates.append(([obj.pk for obj in batch_objs], update_kwargs)) + rows_updated = 0 + queryset = self.using(self.db) + with transaction.atomic(using=self.db, savepoint=False): + for pks, update_kwargs in updates: + rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs) + return rows_updated + + bulk_update.alters_data = True + + async def abulk_update(self, objs, fields, batch_size=None): + return await sync_to_async(self.bulk_update)( + objs=objs, + fields=fields, + batch_size=batch_size, + ) + + abulk_update.alters_data = True + + def get_or_create(self, defaults=None, **kwargs): + """ + Look up an object with the given kwargs, creating one if necessary. + Return a tuple of (object, created), where created is a boolean + specifying whether an object was created. + """ + # The get() needs to be targeted at the write database in order + # to avoid potential transaction consistency problems. + self._for_write = True + try: + return self.get(**kwargs), False + except self.model.DoesNotExist: + params = self._extract_model_params(defaults, **kwargs) + # Try to create an object using passed params. + try: + with transaction.atomic(using=self.db): + params = dict(resolve_callables(params)) + return self.create(**params), True + except IntegrityError: + try: + return self.get(**kwargs), False + except self.model.DoesNotExist: + pass + raise + + async def aget_or_create(self, defaults=None, **kwargs): + return await sync_to_async(self.get_or_create)( + defaults=defaults, + **kwargs, + ) + + def update_or_create(self, defaults=None, create_defaults=None, **kwargs): + """ + Look up an object with the given kwargs, updating one with defaults + if it exists, otherwise create a new one. Optionally, an object can + be created with different values than defaults by using + create_defaults. + Return a tuple (object, created), where created is a boolean + specifying whether an object was created. + """ + if create_defaults is None: + update_defaults = create_defaults = defaults or {} + else: + update_defaults = defaults or {} + self._for_write = True + with transaction.atomic(using=self.db): + # Lock the row so that a concurrent update is blocked until + # update_or_create() has performed its save. + obj, created = self.select_for_update().get_or_create( + create_defaults, **kwargs + ) + if created: + return obj, created + for k, v in resolve_callables(update_defaults): + setattr(obj, k, v) + + update_fields = set(update_defaults) + concrete_field_names = self.model._meta._non_pk_concrete_field_names + # update_fields does not support non-concrete fields. + if concrete_field_names.issuperset(update_fields): + # Add fields which are set on pre_save(), e.g. auto_now fields. + # This is to maintain backward compatibility as these fields + # are not updated unless explicitly specified in the + # update_fields list. + for field in self.model._meta.local_concrete_fields: + if not ( + field.primary_key or field.__class__.pre_save is Field.pre_save + ): + update_fields.add(field.name) + if field.name != field.attname: + update_fields.add(field.attname) + obj.save(using=self.db, update_fields=update_fields) + else: + obj.save(using=self.db) + return obj, False + + async def aupdate_or_create(self, defaults=None, create_defaults=None, **kwargs): + return await sync_to_async(self.update_or_create)( + defaults=defaults, + create_defaults=create_defaults, + **kwargs, + ) + + def _extract_model_params(self, defaults, **kwargs): + """ + Prepare `params` for creating a model instance based on the given + kwargs; for use by get_or_create(). + """ + defaults = defaults or {} + params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} + params.update(defaults) + property_names = self.model._meta._property_names + invalid_params = [] + for param in params: + try: + self.model._meta.get_field(param) + except exceptions.FieldDoesNotExist: + # It's okay to use a model's property if it has a setter. + if not (param in property_names and getattr(self.model, param).fset): + invalid_params.append(param) + if invalid_params: + raise exceptions.FieldError( + "Invalid field name(s) for model %s: '%s'." + % ( + self.model._meta.object_name, + "', '".join(sorted(invalid_params)), + ) + ) + return params + + def _earliest(self, *fields): + """ + Return the earliest object according to fields (if given) or by the + model's Meta.get_latest_by. + """ + if fields: + order_by = fields + else: + order_by = getattr(self.model._meta, "get_latest_by") + if order_by and not isinstance(order_by, (tuple, list)): + order_by = (order_by,) + if order_by is None: + raise ValueError( + "earliest() and latest() require either fields as positional " + "arguments or 'get_latest_by' in the model's Meta." + ) + obj = self._chain() + obj.query.set_limits(high=1) + obj.query.clear_ordering(force=True) + obj.query.add_ordering(*order_by) + return obj.get() + + def earliest(self, *fields): + if self.query.is_sliced: + raise TypeError("Cannot change a query once a slice has been taken.") + return self._earliest(*fields) + + async def aearliest(self, *fields): + return await sync_to_async(self.earliest)(*fields) + + def latest(self, *fields): + """ + Return the latest object according to fields (if given) or by the + model's Meta.get_latest_by. + """ + if self.query.is_sliced: + raise TypeError("Cannot change a query once a slice has been taken.") + return self.reverse()._earliest(*fields) + + async def alatest(self, *fields): + return await sync_to_async(self.latest)(*fields) + + def first(self): + """Return the first object of a query or None if no match is found.""" + if self.ordered: + queryset = self + else: + self._check_ordering_first_last_queryset_aggregation(method="first") + queryset = self.order_by("pk") + for obj in queryset[:1]: + return obj + + async def afirst(self): + return await sync_to_async(self.first)() + + def last(self): + """Return the last object of a query or None if no match is found.""" + if self.ordered: + queryset = self.reverse() + else: + self._check_ordering_first_last_queryset_aggregation(method="last") + queryset = self.order_by("-pk") + for obj in queryset[:1]: + return obj + + async def alast(self): + return await sync_to_async(self.last)() + + def in_bulk(self, id_list=None, *, field_name="pk"): + """ + Return a dictionary mapping each of the given IDs to the object with + that ID. If `id_list` isn't provided, evaluate the entire QuerySet. + """ + if self.query.is_sliced: + raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().") + opts = self.model._meta + unique_fields = [ + constraint.fields[0] + for constraint in opts.total_unique_constraints + if len(constraint.fields) == 1 + ] + if ( + field_name != "pk" + and not opts.get_field(field_name).unique + and field_name not in unique_fields + and self.query.distinct_fields != (field_name,) + ): + raise ValueError( + "in_bulk()'s field_name must be a unique field but %r isn't." + % field_name + ) + if id_list is not None: + if not id_list: + return {} + filter_key = "{}__in".format(field_name) + batch_size = connections[self.db].features.max_query_params + id_list = tuple(id_list) + # If the database has a limit on the number of query parameters + # (e.g. SQLite), retrieve objects in batches if necessary. + if batch_size and batch_size < len(id_list): + qs = () + for offset in range(0, len(id_list), batch_size): + batch = id_list[offset : offset + batch_size] + qs += tuple(self.filter(**{filter_key: batch})) + else: + qs = self.filter(**{filter_key: id_list}) + else: + qs = self._chain() + return {getattr(obj, field_name): obj for obj in qs} + + async def ain_bulk(self, id_list=None, *, field_name="pk"): + return await sync_to_async(self.in_bulk)( + id_list=id_list, + field_name=field_name, + ) + + def delete(self): + """Delete the records in the current QuerySet.""" + self._not_support_combined_queries("delete") + if self.query.is_sliced: + raise TypeError("Cannot use 'limit' or 'offset' with delete().") + if self.query.distinct_fields: + raise TypeError("Cannot call delete() after .distinct(*fields).") + if self._fields is not None: + raise TypeError("Cannot call delete() after .values() or .values_list()") + + del_query = self._chain() + + # The delete is actually 2 queries - one to find related objects, + # and one to delete. Make sure that the discovery of related + # objects is performed on the same database as the deletion. + del_query._for_write = True + + # Disable non-supported fields. + del_query.query.select_for_update = False + del_query.query.select_related = False + del_query.query.clear_ordering(force=True) + + collector = Collector(using=del_query.db, origin=self) + collector.collect(del_query) + deleted, _rows_count = collector.delete() + + # Clear the result cache, in case this QuerySet gets reused. + self._result_cache = None + return deleted, _rows_count + + delete.alters_data = True + delete.queryset_only = True + + async def adelete(self): + return await sync_to_async(self.delete)() + + adelete.alters_data = True + adelete.queryset_only = True + + def _raw_delete(self, using): + """ + Delete objects found from the given queryset in single direct SQL + query. No signals are sent and there is no protection for cascades. + """ + query = self.query.clone() + query.__class__ = sql.DeleteQuery + cursor = query.get_compiler(using).execute_sql(CURSOR) + if cursor: + with cursor: + return cursor.rowcount + return 0 + + _raw_delete.alters_data = True + + def update(self, **kwargs): + """ + Update all elements in the current QuerySet, setting all the given + fields to the appropriate values. + """ + self._not_support_combined_queries("update") + if self.query.is_sliced: + raise TypeError("Cannot update a query once a slice has been taken.") + self._for_write = True + query = self.query.chain(sql.UpdateQuery) + query.add_update_values(kwargs) + + # Inline annotations in order_by(), if possible. + new_order_by = [] + for col in query.order_by: + alias = col + descending = False + if isinstance(alias, str) and alias.startswith("-"): + alias = alias.removeprefix("-") + descending = True + if annotation := query.annotations.get(alias): + if getattr(annotation, "contains_aggregate", False): + raise exceptions.FieldError( + f"Cannot update when ordering by an aggregate: {annotation}" + ) + if descending: + annotation = annotation.desc() + new_order_by.append(annotation) + else: + new_order_by.append(col) + query.order_by = tuple(new_order_by) + + # Clear any annotations so that they won't be present in subqueries. + query.annotations = {} + with transaction.mark_for_rollback_on_error(using=self.db): + rows = query.get_compiler(self.db).execute_sql(CURSOR) + self._result_cache = None + return rows + + update.alters_data = True + + async def aupdate(self, **kwargs): + return await sync_to_async(self.update)(**kwargs) + + aupdate.alters_data = True + + def _update(self, values): + """ + A version of update() that accepts field objects instead of field names. + Used primarily for model saving and not intended for use by general + code (it requires too much poking around at model internals to be + useful at that level). + """ + if self.query.is_sliced: + raise TypeError("Cannot update a query once a slice has been taken.") + query = self.query.chain(sql.UpdateQuery) + query.add_update_fields(values) + # Clear any annotations so that they won't be present in subqueries. + query.annotations = {} + self._result_cache = None + return query.get_compiler(self.db).execute_sql(CURSOR) + + _update.alters_data = True + _update.queryset_only = False + + def exists(self): + """ + Return True if the QuerySet would have any results, False otherwise. + """ + if self._result_cache is None: + return self.query.has_results(using=self.db) + return bool(self._result_cache) + + async def aexists(self): + return await sync_to_async(self.exists)() + + def contains(self, obj): + """ + Return True if the QuerySet contains the provided obj, + False otherwise. + """ + self._not_support_combined_queries("contains") + if self._fields is not None: + raise TypeError( + "Cannot call QuerySet.contains() after .values() or .values_list()." + ) + try: + if obj._meta.concrete_model != self.model._meta.concrete_model: + return False + except AttributeError: + raise TypeError("'obj' must be a model instance.") + if obj.pk is None: + raise ValueError("QuerySet.contains() cannot be used on unsaved objects.") + if self._result_cache is not None: + return obj in self._result_cache + return self.filter(pk=obj.pk).exists() + + async def acontains(self, obj): + return await sync_to_async(self.contains)(obj=obj) + + def _prefetch_related_objects(self): + # This method can only be called once the result cache has been filled. + prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) + self._prefetch_done = True + + def explain(self, *, format=None, **options): + """ + Runs an EXPLAIN on the SQL query this QuerySet would perform, and + returns the results. + """ + return self.query.explain(using=self.db, format=format, **options) + + async def aexplain(self, *, format=None, **options): + return await sync_to_async(self.explain)(format=format, **options) + + ################################################## + # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # + ################################################## + + def raw(self, raw_query, params=(), translations=None, using=None): + if using is None: + using = self.db + qs = RawQuerySet( + raw_query, + model=self.model, + params=params, + translations=translations, + using=using, + ) + qs._prefetch_related_lookups = self._prefetch_related_lookups[:] + return qs + + def _values(self, *fields, **expressions): + clone = self._chain() + if expressions: + clone = clone.annotate(**expressions) + clone._fields = fields + clone.query.set_values(fields) + return clone + + def values(self, *fields, **expressions): + fields += tuple(expressions) + clone = self._values(*fields, **expressions) + clone._iterable_class = ValuesIterable + return clone + + def values_list(self, *fields, flat=False, named=False): + if flat and named: + raise TypeError("'flat' and 'named' can't be used together.") + if flat and len(fields) > 1: + raise TypeError( + "'flat' is not valid when values_list is called with more than one " + "field." + ) + + field_names = {f for f in fields if not hasattr(f, "resolve_expression")} + _fields = [] + expressions = {} + counter = 1 + for field in fields: + if hasattr(field, "resolve_expression"): + field_id_prefix = getattr( + field, "default_alias", field.__class__.__name__.lower() + ) + while True: + field_id = field_id_prefix + str(counter) + counter += 1 + if field_id not in field_names: + break + expressions[field_id] = field + _fields.append(field_id) + else: + _fields.append(field) + + clone = self._values(*_fields, **expressions) + clone._iterable_class = ( + NamedValuesListIterable + if named + else FlatValuesListIterable + if flat + else ValuesListIterable + ) + return clone + + def dates(self, field_name, kind, order="ASC"): + """ + Return a list of date objects representing all available dates for + the given field_name, scoped to 'kind'. + """ + if kind not in ("year", "month", "week", "day"): + raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.") + if order not in ("ASC", "DESC"): + raise ValueError("'order' must be either 'ASC' or 'DESC'.") + return ( + self.annotate( + datefield=Trunc(field_name, kind, output_field=DateField()), + plain_field=F(field_name), + ) + .values_list("datefield", flat=True) + .distinct() + .filter(plain_field__isnull=False) + .order_by(("-" if order == "DESC" else "") + "datefield") + ) + + def datetimes(self, field_name, kind, order="ASC", tzinfo=None): + """ + Return a list of datetime objects representing all available + datetimes for the given field_name, scoped to 'kind'. + """ + if kind not in ("year", "month", "week", "day", "hour", "minute", "second"): + raise ValueError( + "'kind' must be one of 'year', 'month', 'week', 'day', " + "'hour', 'minute', or 'second'." + ) + if order not in ("ASC", "DESC"): + raise ValueError("'order' must be either 'ASC' or 'DESC'.") + if settings.USE_TZ: + if tzinfo is None: + tzinfo = timezone.get_current_timezone() + else: + tzinfo = None + return ( + self.annotate( + datetimefield=Trunc( + field_name, + kind, + output_field=DateTimeField(), + tzinfo=tzinfo, + ), + plain_field=F(field_name), + ) + .values_list("datetimefield", flat=True) + .distinct() + .filter(plain_field__isnull=False) + .order_by(("-" if order == "DESC" else "") + "datetimefield") + ) + + def none(self): + """Return an empty QuerySet.""" + clone = self._chain() + clone.query.set_empty() + return clone + + ################################################################## + # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # + ################################################################## + + def all(self): + """ + Return a new QuerySet that is a copy of the current one. This allows a + QuerySet to proxy for a model manager in some cases. + """ + return self._chain() + + def filter(self, *args, **kwargs): + """ + Return a new QuerySet instance with the args ANDed to the existing + set. + """ + self._not_support_combined_queries("filter") + return self._filter_or_exclude(False, args, kwargs) + + def exclude(self, *args, **kwargs): + """ + Return a new QuerySet instance with NOT (args) ANDed to the existing + set. + """ + self._not_support_combined_queries("exclude") + return self._filter_or_exclude(True, args, kwargs) + + def _filter_or_exclude(self, negate, args, kwargs): + if (args or kwargs) and self.query.is_sliced: + raise TypeError("Cannot filter a query once a slice has been taken.") + clone = self._chain() + if self._defer_next_filter: + self._defer_next_filter = False + clone._deferred_filter = negate, args, kwargs + else: + clone._filter_or_exclude_inplace(negate, args, kwargs) + return clone + + def _filter_or_exclude_inplace(self, negate, args, kwargs): + if negate: + self._query.add_q(~Q(*args, **kwargs)) + else: + self._query.add_q(Q(*args, **kwargs)) + + def complex_filter(self, filter_obj): + """ + Return a new QuerySet instance with filter_obj added to the filters. + + filter_obj can be a Q object or a dictionary of keyword lookup + arguments. + + This exists to support framework features such as 'limit_choices_to', + and usually it will be more natural to use other methods. + """ + if isinstance(filter_obj, Q): + clone = self._chain() + clone.query.add_q(filter_obj) + return clone + else: + return self._filter_or_exclude(False, args=(), kwargs=filter_obj) + + def _combinator_query(self, combinator, *other_qs, all=False): + # Clone the query to inherit the select list and everything + clone = self._chain() + # Clear limits and ordering so they can be reapplied + clone.query.clear_ordering(force=True) + clone.query.clear_limits() + clone.query.combined_queries = (self.query,) + tuple( + qs.query for qs in other_qs + ) + clone.query.combinator = combinator + clone.query.combinator_all = all + return clone + + def union(self, *other_qs, all=False): + # If the query is an EmptyQuerySet, combine all nonempty querysets. + if isinstance(self, EmptyQuerySet): + qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] + if not qs: + return self + if len(qs) == 1: + return qs[0] + return qs[0]._combinator_query("union", *qs[1:], all=all) + return self._combinator_query("union", *other_qs, all=all) + + def intersection(self, *other_qs): + # If any query is an EmptyQuerySet, return it. + if isinstance(self, EmptyQuerySet): + return self + for other in other_qs: + if isinstance(other, EmptyQuerySet): + return other + return self._combinator_query("intersection", *other_qs) + + def difference(self, *other_qs): + # If the query is an EmptyQuerySet, return it. + if isinstance(self, EmptyQuerySet): + return self + return self._combinator_query("difference", *other_qs) + + def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False): + """ + Return a new QuerySet instance that will select objects with a + FOR UPDATE lock. + """ + if nowait and skip_locked: + raise ValueError("The nowait option cannot be used with skip_locked.") + obj = self._chain() + obj._for_write = True + obj.query.select_for_update = True + obj.query.select_for_update_nowait = nowait + obj.query.select_for_update_skip_locked = skip_locked + obj.query.select_for_update_of = of + obj.query.select_for_no_key_update = no_key + return obj + + def select_related(self, *fields): + """ + Return a new QuerySet instance that will select related objects. + + If fields are specified, they must be ForeignKey fields and only those + related objects are included in the selection. + + If select_related(None) is called, clear the list. + """ + self._not_support_combined_queries("select_related") + if self._fields is not None: + raise TypeError( + "Cannot call select_related() after .values() or .values_list()" + ) + + obj = self._chain() + if fields == (None,): + obj.query.select_related = False + elif fields: + obj.query.add_select_related(fields) + else: + obj.query.select_related = True + return obj + + def prefetch_related(self, *lookups): + """ + Return a new QuerySet instance that will prefetch the specified + Many-To-One and Many-To-Many related objects when the QuerySet is + evaluated. + + When prefetch_related() is called more than once, append to the list of + prefetch lookups. If prefetch_related(None) is called, clear the list. + """ + self._not_support_combined_queries("prefetch_related") + clone = self._chain() + if lookups == (None,): + clone._prefetch_related_lookups = () + else: + for lookup in lookups: + if isinstance(lookup, Prefetch): + lookup = lookup.prefetch_to + lookup = lookup.split(LOOKUP_SEP, 1)[0] + if lookup in self.query._filtered_relations: + raise ValueError( + "prefetch_related() is not supported with FilteredRelation." + ) + clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups + return clone + + def annotate(self, *args, **kwargs): + """ + Return a query set in which the returned objects have been annotated + with extra data or aggregations. + """ + self._not_support_combined_queries("annotate") + return self._annotate(args, kwargs, select=True) + + def alias(self, *args, **kwargs): + """ + Return a query set with added aliases for extra data or aggregations. + """ + self._not_support_combined_queries("alias") + return self._annotate(args, kwargs, select=False) + + def _annotate(self, args, kwargs, select=True): + self._validate_values_are_expressions( + args + tuple(kwargs.values()), method_name="annotate" + ) + annotations = {} + for arg in args: + # The default_alias property may raise a TypeError. + try: + if arg.default_alias in kwargs: + raise ValueError( + "The named annotation '%s' conflicts with the " + "default name for another annotation." % arg.default_alias + ) + except TypeError: + raise TypeError("Complex annotations require an alias") + annotations[arg.default_alias] = arg + annotations.update(kwargs) + + clone = self._chain() + names = self._fields + if names is None: + names = set( + chain.from_iterable( + (field.name, field.attname) + if hasattr(field, "attname") + else (field.name,) + for field in self.model._meta.get_fields() + ) + ) + + for alias, annotation in annotations.items(): + if alias in names: + raise ValueError( + "The annotation '%s' conflicts with a field on " + "the model." % alias + ) + if isinstance(annotation, FilteredRelation): + clone.query.add_filtered_relation(annotation, alias) + else: + clone.query.add_annotation( + annotation, + alias, + select=select, + ) + for alias, annotation in clone.query.annotations.items(): + if alias in annotations and annotation.contains_aggregate: + if clone._fields is None: + clone.query.group_by = True + else: + clone.query.set_group_by() + break + + return clone + + def order_by(self, *field_names): + """Return a new QuerySet instance with the ordering changed.""" + if self.query.is_sliced: + raise TypeError("Cannot reorder a query once a slice has been taken.") + obj = self._chain() + obj.query.clear_ordering(force=True, clear_default=False) + obj.query.add_ordering(*field_names) + return obj + + def distinct(self, *field_names): + """ + Return a new QuerySet instance that will select only distinct results. + """ + self._not_support_combined_queries("distinct") + if self.query.is_sliced: + raise TypeError( + "Cannot create distinct fields once a slice has been taken." + ) + obj = self._chain() + obj.query.add_distinct_fields(*field_names) + return obj + + def extra( + self, + select=None, + where=None, + params=None, + tables=None, + order_by=None, + select_params=None, + ): + """Add extra SQL fragments to the query.""" + self._not_support_combined_queries("extra") + if self.query.is_sliced: + raise TypeError("Cannot change a query once a slice has been taken.") + clone = self._chain() + clone.query.add_extra(select, select_params, where, params, tables, order_by) + return clone + + def reverse(self): + """Reverse the ordering of the QuerySet.""" + if self.query.is_sliced: + raise TypeError("Cannot reverse a query once a slice has been taken.") + clone = self._chain() + clone.query.standard_ordering = not clone.query.standard_ordering + return clone + + def defer(self, *fields): + """ + Defer the loading of data for certain fields until they are accessed. + Add the set of deferred fields to any existing set of deferred fields. + The only exception to this is if None is passed in as the only + parameter, in which case removal all deferrals. + """ + self._not_support_combined_queries("defer") + if self._fields is not None: + raise TypeError("Cannot call defer() after .values() or .values_list()") + clone = self._chain() + if fields == (None,): + clone.query.clear_deferred_loading() + else: + clone.query.add_deferred_loading(fields) + return clone + + def only(self, *fields): + """ + Essentially, the opposite of defer(). Only the fields passed into this + method and that are not already specified as deferred are loaded + immediately when the queryset is evaluated. + """ + self._not_support_combined_queries("only") + if self._fields is not None: + raise TypeError("Cannot call only() after .values() or .values_list()") + if fields == (None,): + # Can only pass None to defer(), not only(), as the rest option. + # That won't stop people trying to do this, so let's be explicit. + raise TypeError("Cannot pass None as an argument to only().") + for field in fields: + field = field.split(LOOKUP_SEP, 1)[0] + if field in self.query._filtered_relations: + raise ValueError("only() is not supported with FilteredRelation.") + clone = self._chain() + clone.query.add_immediate_loading(fields) + return clone + + def using(self, alias): + """Select which database this QuerySet should execute against.""" + clone = self._chain() + clone._db = alias + return clone + + ################################### + # PUBLIC INTROSPECTION ATTRIBUTES # + ################################### + + @property + def ordered(self): + """ + Return True if the QuerySet is ordered -- i.e. has an order_by() + clause or a default ordering on the model (or is empty). + """ + if isinstance(self, EmptyQuerySet): + return True + if self.query.extra_order_by or self.query.order_by: + return True + elif ( + self.query.default_ordering + and self.query.get_meta().ordering + and + # A default ordering doesn't affect GROUP BY queries. + not self.query.group_by + ): + return True + else: + return False + + @property + def db(self): + """Return the database used if this query is executed now.""" + if self._for_write: + return self._db or router.db_for_write(self.model, **self._hints) + return self._db or router.db_for_read(self.model, **self._hints) + + ################### + # PRIVATE METHODS # + ################### + + def _insert( + self, + objs, + fields, + returning_fields=None, + raw=False, + using=None, + on_conflict=None, + update_fields=None, + unique_fields=None, + ): + """ + Insert a new record for the given model. This provides an interface to + the InsertQuery class and is how Model.save() is implemented. + """ + self._for_write = True + if using is None: + using = self.db + query = sql.InsertQuery( + self.model, + on_conflict=on_conflict, + update_fields=update_fields, + unique_fields=unique_fields, + ) + query.insert_values(fields, objs, raw=raw) + return query.get_compiler(using=using).execute_sql(returning_fields) + + _insert.alters_data = True + _insert.queryset_only = False + + def _batched_insert( + self, + objs, + fields, + batch_size, + on_conflict=None, + update_fields=None, + unique_fields=None, + ): + """ + Helper method for bulk_create() to insert objs one batch at a time. + """ + connection = connections[self.db] + ops = connection.ops + max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) + batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size + inserted_rows = [] + bulk_return = connection.features.can_return_rows_from_bulk_insert + for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]: + if bulk_return and ( + on_conflict is None or on_conflict == OnConflict.UPDATE + ): + inserted_rows.extend( + self._insert( + item, + fields=fields, + using=self.db, + on_conflict=on_conflict, + update_fields=update_fields, + unique_fields=unique_fields, + returning_fields=self.model._meta.db_returning_fields, + ) + ) + else: + self._insert( + item, + fields=fields, + using=self.db, + on_conflict=on_conflict, + update_fields=update_fields, + unique_fields=unique_fields, + ) + return inserted_rows + + def _chain(self): + """ + Return a copy of the current QuerySet that's ready for another + operation. + """ + obj = self._clone() + if obj._sticky_filter: + obj.query.filter_is_sticky = True + obj._sticky_filter = False + return obj + + def _clone(self): + """ + Return a copy of the current QuerySet. A lightweight alternative + to deepcopy(). + """ + c = self.__class__( + model=self.model, + query=self.query.chain(), + using=self._db, + hints=self._hints, + ) + c._sticky_filter = self._sticky_filter + c._for_write = self._for_write + c._prefetch_related_lookups = self._prefetch_related_lookups[:] + c._known_related_objects = self._known_related_objects + c._iterable_class = self._iterable_class + c._fields = self._fields + return c + + def _fetch_all(self): + if self._result_cache is None: + self._result_cache = list(self._iterable_class(self)) + if self._prefetch_related_lookups and not self._prefetch_done: + self._prefetch_related_objects() + + def _next_is_sticky(self): + """ + Indicate that the next filter call and the one following that should + be treated as a single filter. This is only important when it comes to + determining when to reuse tables for many-to-many filters. Required so + that we can filter naturally on the results of related managers. + + This doesn't return a clone of the current QuerySet (it returns + "self"). The method is only used internally and should be immediately + followed by a filter() that does create a clone. + """ + self._sticky_filter = True + return self + + def _merge_sanity_check(self, other): + """Check that two QuerySet classes may be merged.""" + if self._fields is not None and ( + set(self.query.values_select) != set(other.query.values_select) + or set(self.query.extra_select) != set(other.query.extra_select) + or set(self.query.annotation_select) != set(other.query.annotation_select) + ): + raise TypeError( + "Merging '%s' classes must involve the same values in each case." + % self.__class__.__name__ + ) + + def _merge_known_related_objects(self, other): + """ + Keep track of all known related objects from either QuerySet instance. + """ + for field, objects in other._known_related_objects.items(): + self._known_related_objects.setdefault(field, {}).update(objects) + + def resolve_expression(self, *args, **kwargs): + if self._fields and len(self._fields) > 1: + # values() queryset can only be used as nested queries + # if they are set up to select only a single field. + raise TypeError("Cannot use multi-field values as a filter value.") + query = self.query.resolve_expression(*args, **kwargs) + query._db = self._db + return query + + resolve_expression.queryset_only = True + + def _add_hints(self, **hints): + """ + Update hinting information for use by routers. Add new key/values or + overwrite existing key/values. + """ + self._hints.update(hints) + + def _has_filters(self): + """ + Check if this QuerySet has any filtering going on. This isn't + equivalent with checking if all objects are present in results, for + example, qs[1:]._has_filters() -> False. + """ + return self.query.has_filters() + + @staticmethod + def _validate_values_are_expressions(values, method_name): + invalid_args = sorted( + str(arg) for arg in values if not hasattr(arg, "resolve_expression") + ) + if invalid_args: + raise TypeError( + "QuerySet.%s() received non-expression(s): %s." + % ( + method_name, + ", ".join(invalid_args), + ) + ) + + def _not_support_combined_queries(self, operation_name): + if self.query.combinator: + raise NotSupportedError( + "Calling QuerySet.%s() after %s() is not supported." + % (operation_name, self.query.combinator) + ) + + def _check_operator_queryset(self, other, operator_): + if self.query.combinator or other.query.combinator: + raise TypeError(f"Cannot use {operator_} operator with combined queryset.") + + def _check_ordering_first_last_queryset_aggregation(self, method): + if isinstance(self.query.group_by, tuple) and not any( + col.output_field is self.model._meta.pk for col in self.query.group_by + ): + raise TypeError( + f"Cannot use QuerySet.{method}() on an unordered queryset performing " + f"aggregation. Add an ordering with order_by()." + ) + + +class InstanceCheckMeta(type): + def __instancecheck__(self, instance): + return isinstance(instance, QuerySet) and instance.query.is_empty() + + +class EmptyQuerySet(metaclass=InstanceCheckMeta): + """ + Marker class to checking if a queryset is empty by .none(): + isinstance(qs.none(), EmptyQuerySet) -> True + """ + + def __init__(self, *args, **kwargs): + raise TypeError("EmptyQuerySet can't be instantiated") + + +class RawQuerySet: + """ + Provide an iterator which converts the results of raw SQL queries into + annotated model instances. + """ + + def __init__( + self, + raw_query, + model=None, + query=None, + params=(), + translations=None, + using=None, + hints=None, + ): + self.raw_query = raw_query + self.model = model + self._db = using + self._hints = hints or {} + self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) + self.params = params + self.translations = translations or {} + self._result_cache = None + self._prefetch_related_lookups = () + self._prefetch_done = False + + def resolve_model_init_order(self): + """Resolve the init field names and value positions.""" + converter = connections[self.db].introspection.identifier_converter + model_init_fields = [ + f for f in self.model._meta.fields if converter(f.column) in self.columns + ] + annotation_fields = [ + (column, pos) + for pos, column in enumerate(self.columns) + if column not in self.model_fields + ] + model_init_order = [ + self.columns.index(converter(f.column)) for f in model_init_fields + ] + model_init_names = [f.attname for f in model_init_fields] + return model_init_names, model_init_order, annotation_fields + + def prefetch_related(self, *lookups): + """Same as QuerySet.prefetch_related()""" + clone = self._clone() + if lookups == (None,): + clone._prefetch_related_lookups = () + else: + clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups + return clone + + def _prefetch_related_objects(self): + prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) + self._prefetch_done = True + + def _clone(self): + """Same as QuerySet._clone()""" + c = self.__class__( + self.raw_query, + model=self.model, + query=self.query, + params=self.params, + translations=self.translations, + using=self._db, + hints=self._hints, + ) + c._prefetch_related_lookups = self._prefetch_related_lookups[:] + return c + + def _fetch_all(self): + if self._result_cache is None: + self._result_cache = list(self.iterator()) + if self._prefetch_related_lookups and not self._prefetch_done: + self._prefetch_related_objects() + + def __len__(self): + self._fetch_all() + return len(self._result_cache) + + def __bool__(self): + self._fetch_all() + return bool(self._result_cache) + + def __iter__(self): + self._fetch_all() + return iter(self._result_cache) + + def __aiter__(self): + # Remember, __aiter__ itself is synchronous, it's the thing it returns + # that is async! + async def generator(): + await sync_to_async(self._fetch_all)() + for item in self._result_cache: + yield item + + return generator() + + def iterator(self): + yield from RawModelIterable(self) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self.query) + + def __getitem__(self, k): + return list(self)[k] + + @property + def db(self): + """Return the database used if this query is executed now.""" + return self._db or router.db_for_read(self.model, **self._hints) + + def using(self, alias): + """Select the database this RawQuerySet should execute against.""" + return RawQuerySet( + self.raw_query, + model=self.model, + query=self.query.chain(using=alias), + params=self.params, + translations=self.translations, + using=alias, + ) + + @cached_property + def columns(self): + """ + A list of model field names in the order they'll appear in the + query results. + """ + columns = self.query.get_columns() + # Adjust any column names which don't match field names + for query_name, model_name in self.translations.items(): + # Ignore translations for nonexistent column names + try: + index = columns.index(query_name) + except ValueError: + pass + else: + columns[index] = model_name + return columns + + @cached_property + def model_fields(self): + """A dict mapping column names to model field names.""" + converter = connections[self.db].introspection.identifier_converter + model_fields = {} + for field in self.model._meta.fields: + name, column = field.get_attname_column() + model_fields[converter(column)] = field + return model_fields + + +class Prefetch: + def __init__(self, lookup, queryset=None, to_attr=None): + # `prefetch_through` is the path we traverse to perform the prefetch. + self.prefetch_through = lookup + # `prefetch_to` is the path to the attribute that stores the result. + self.prefetch_to = lookup + if queryset is not None and ( + isinstance(queryset, RawQuerySet) + or ( + hasattr(queryset, "_iterable_class") + and not issubclass(queryset._iterable_class, ModelIterable) + ) + ): + raise ValueError( + "Prefetch querysets cannot use raw(), values(), and values_list()." + ) + if to_attr: + self.prefetch_to = LOOKUP_SEP.join( + lookup.split(LOOKUP_SEP)[:-1] + [to_attr] + ) + + self.queryset = queryset + self.to_attr = to_attr + + def __getstate__(self): + obj_dict = self.__dict__.copy() + if self.queryset is not None: + queryset = self.queryset._chain() + # Prevent the QuerySet from being evaluated + queryset._result_cache = [] + queryset._prefetch_done = True + obj_dict["queryset"] = queryset + return obj_dict + + def add_prefix(self, prefix): + self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through + self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to + + def get_current_prefetch_to(self, level): + return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1]) + + def get_current_to_attr(self, level): + parts = self.prefetch_to.split(LOOKUP_SEP) + to_attr = parts[level] + as_attr = self.to_attr and level == len(parts) - 1 + return to_attr, as_attr + + def get_current_queryset(self, level): + if self.get_current_prefetch_to(level) == self.prefetch_to: + return self.queryset + return None + + def __eq__(self, other): + if not isinstance(other, Prefetch): + return NotImplemented + return self.prefetch_to == other.prefetch_to + + def __hash__(self): + return hash((self.__class__, self.prefetch_to)) + + +def normalize_prefetch_lookups(lookups, prefix=None): + """Normalize lookups into Prefetch objects.""" + ret = [] + for lookup in lookups: + if not isinstance(lookup, Prefetch): + lookup = Prefetch(lookup) + if prefix: + lookup.add_prefix(prefix) + ret.append(lookup) + return ret + + +def prefetch_related_objects(model_instances, *related_lookups): + """ + Populate prefetched object caches for a list of model instances based on + the lookups/Prefetch instances given. + """ + if not model_instances: + return # nothing to do + + # We need to be able to dynamically add to the list of prefetch_related + # lookups that we look up (see below). So we need some book keeping to + # ensure we don't do duplicate work. + done_queries = {} # dictionary of things like 'foo__bar': [results] + + auto_lookups = set() # we add to this as we go through. + followed_descriptors = set() # recursion protection + + all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) + while all_lookups: + lookup = all_lookups.pop() + if lookup.prefetch_to in done_queries: + if lookup.queryset is not None: + raise ValueError( + "'%s' lookup was already seen with a different queryset. " + "You may need to adjust the ordering of your lookups." + % lookup.prefetch_to + ) + + continue + + # Top level, the list of objects to decorate is the result cache + # from the primary QuerySet. It won't be for deeper levels. + obj_list = model_instances + + through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) + for level, through_attr in enumerate(through_attrs): + # Prepare main instances + if not obj_list: + break + + prefetch_to = lookup.get_current_prefetch_to(level) + if prefetch_to in done_queries: + # Skip any prefetching, and any object preparation + obj_list = done_queries[prefetch_to] + continue + + # Prepare objects: + good_objects = True + for obj in obj_list: + # Since prefetching can re-use instances, it is possible to have + # the same instance multiple times in obj_list, so obj might + # already be prepared. + if not hasattr(obj, "_prefetched_objects_cache"): + try: + obj._prefetched_objects_cache = {} + except (AttributeError, TypeError): + # Must be an immutable object from + # values_list(flat=True), for example (TypeError) or + # a QuerySet subclass that isn't returning Model + # instances (AttributeError), either in Django or a 3rd + # party. prefetch_related() doesn't make sense, so quit. + good_objects = False + break + if not good_objects: + break + + # Descend down tree + + # We assume that objects retrieved are homogeneous (which is the premise + # of prefetch_related), so what applies to first object applies to all. + first_obj = obj_list[0] + to_attr = lookup.get_current_to_attr(level)[0] + prefetcher, descriptor, attr_found, is_fetched = get_prefetcher( + first_obj, through_attr, to_attr + ) + + if not attr_found: + raise AttributeError( + "Cannot find '%s' on %s object, '%s' is an invalid " + "parameter to prefetch_related()" + % ( + through_attr, + first_obj.__class__.__name__, + lookup.prefetch_through, + ) + ) + + if level == len(through_attrs) - 1 and prefetcher is None: + # Last one, this *must* resolve to something that supports + # prefetching, otherwise there is no point adding it and the + # developer asking for it has made a mistake. + raise ValueError( + "'%s' does not resolve to an item that supports " + "prefetching - this is an invalid parameter to " + "prefetch_related()." % lookup.prefetch_through + ) + + obj_to_fetch = None + if prefetcher is not None: + obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)] + + if obj_to_fetch: + obj_list, additional_lookups = prefetch_one_level( + obj_to_fetch, + prefetcher, + lookup, + level, + ) + # We need to ensure we don't keep adding lookups from the + # same relationships to stop infinite recursion. So, if we + # are already on an automatically added lookup, don't add + # the new lookups from relationships we've seen already. + if not ( + prefetch_to in done_queries + and lookup in auto_lookups + and descriptor in followed_descriptors + ): + done_queries[prefetch_to] = obj_list + new_lookups = normalize_prefetch_lookups( + reversed(additional_lookups), prefetch_to + ) + auto_lookups.update(new_lookups) + all_lookups.extend(new_lookups) + followed_descriptors.add(descriptor) + else: + # Either a singly related object that has already been fetched + # (e.g. via select_related), or hopefully some other property + # that doesn't support prefetching but needs to be traversed. + + # We replace the current list of parent objects with the list + # of related objects, filtering out empty or missing values so + # that we can continue with nullable or reverse relations. + new_obj_list = [] + for obj in obj_list: + if through_attr in getattr(obj, "_prefetched_objects_cache", ()): + # If related objects have been prefetched, use the + # cache rather than the object's through_attr. + new_obj = list(obj._prefetched_objects_cache.get(through_attr)) + else: + try: + new_obj = getattr(obj, through_attr) + except exceptions.ObjectDoesNotExist: + continue + if new_obj is None: + continue + # We special-case `list` rather than something more generic + # like `Iterable` because we don't want to accidentally match + # user models that define __iter__. + if isinstance(new_obj, list): + new_obj_list.extend(new_obj) + else: + new_obj_list.append(new_obj) + obj_list = new_obj_list + + +def get_prefetcher(instance, through_attr, to_attr): + """ + For the attribute 'through_attr' on the given instance, find + an object that has a get_prefetch_queryset(). + Return a 4 tuple containing: + (the object with get_prefetch_queryset (or None), + the descriptor object representing this relationship (or None), + a boolean that is False if the attribute was not found at all, + a function that takes an instance and returns a boolean that is True if + the attribute has already been fetched for that instance) + """ + + def has_to_attr_attribute(instance): + return hasattr(instance, to_attr) + + prefetcher = None + is_fetched = has_to_attr_attribute + + # For singly related objects, we have to avoid getting the attribute + # from the object, as this will trigger the query. So we first try + # on the class, in order to get the descriptor object. + rel_obj_descriptor = getattr(instance.__class__, through_attr, None) + if rel_obj_descriptor is None: + attr_found = hasattr(instance, through_attr) + else: + attr_found = True + if rel_obj_descriptor: + # singly related object, descriptor object has the + # get_prefetch_queryset() method. + if hasattr(rel_obj_descriptor, "get_prefetch_queryset"): + prefetcher = rel_obj_descriptor + is_fetched = rel_obj_descriptor.is_cached + else: + # descriptor doesn't support prefetching, so we go ahead and get + # the attribute on the instance rather than the class to + # support many related managers + rel_obj = getattr(instance, through_attr) + if hasattr(rel_obj, "get_prefetch_queryset"): + prefetcher = rel_obj + if through_attr != to_attr: + # Special case cached_property instances because hasattr + # triggers attribute computation and assignment. + if isinstance( + getattr(instance.__class__, to_attr, None), cached_property + ): + + def has_cached_property(instance): + return to_attr in instance.__dict__ + + is_fetched = has_cached_property + else: + + def in_prefetched_cache(instance): + return through_attr in instance._prefetched_objects_cache + + is_fetched = in_prefetched_cache + return prefetcher, rel_obj_descriptor, attr_found, is_fetched + + +def prefetch_one_level(instances, prefetcher, lookup, level): + """ + Helper function for prefetch_related_objects(). + + Run prefetches on all instances using the prefetcher object, + assigning results to relevant caches in instance. + + Return the prefetched objects along with any additional prefetches that + must be done due to prefetch_related lookups found from default managers. + """ + # prefetcher must have a method get_prefetch_queryset() which takes a list + # of instances, and returns a tuple: + + # (queryset of instances of self.model that are related to passed in instances, + # callable that gets value to be matched for returned instances, + # callable that gets value to be matched for passed in instances, + # boolean that is True for singly related objects, + # cache or field name to assign to, + # boolean that is True when the previous argument is a cache name vs a field name). + + # The 'values to be matched' must be hashable as they will be used + # in a dictionary. + + ( + rel_qs, + rel_obj_attr, + instance_attr, + single, + cache_name, + is_descriptor, + ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)) + # We have to handle the possibility that the QuerySet we just got back + # contains some prefetch_related lookups. We don't want to trigger the + # prefetch_related functionality by evaluating the query. Rather, we need + # to merge in the prefetch_related lookups. + # Copy the lookups in case it is a Prefetch object which could be reused + # later (happens in nested prefetch_related). + additional_lookups = [ + copy.copy(additional_lookup) + for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ()) + ] + if additional_lookups: + # Don't need to clone because the manager should have given us a fresh + # instance, so we access an internal instead of using public interface + # for performance reasons. + rel_qs._prefetch_related_lookups = () + + all_related_objects = list(rel_qs) + + rel_obj_cache = {} + for rel_obj in all_related_objects: + rel_attr_val = rel_obj_attr(rel_obj) + rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) + + to_attr, as_attr = lookup.get_current_to_attr(level) + # Make sure `to_attr` does not conflict with a field. + if as_attr and instances: + # We assume that objects retrieved are homogeneous (which is the premise + # of prefetch_related), so what applies to first object applies to all. + model = instances[0].__class__ + try: + model._meta.get_field(to_attr) + except exceptions.FieldDoesNotExist: + pass + else: + msg = "to_attr={} conflicts with a field on the {} model." + raise ValueError(msg.format(to_attr, model.__name__)) + + # Whether or not we're prefetching the last part of the lookup. + leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level + + for obj in instances: + instance_attr_val = instance_attr(obj) + vals = rel_obj_cache.get(instance_attr_val, []) + + if single: + val = vals[0] if vals else None + if as_attr: + # A to_attr has been given for the prefetch. + setattr(obj, to_attr, val) + elif is_descriptor: + # cache_name points to a field name in obj. + # This field is a descriptor for a related object. + setattr(obj, cache_name, val) + else: + # No to_attr has been given for this prefetch operation and the + # cache_name does not point to a descriptor. Store the value of + # the field in the object's field cache. + obj._state.fields_cache[cache_name] = val + else: + if as_attr: + setattr(obj, to_attr, vals) + else: + manager = getattr(obj, to_attr) + if leaf and lookup.queryset is not None: + qs = manager._apply_rel_filters(lookup.queryset) + else: + qs = manager.get_queryset() + qs._result_cache = vals + # We don't want the individual qs doing prefetch_related now, + # since we have merged this into the current work. + qs._prefetch_done = True + obj._prefetched_objects_cache[cache_name] = qs + return all_related_objects, additional_lookups + + +class RelatedPopulator: + """ + RelatedPopulator is used for select_related() object instantiation. + + The idea is that each select_related() model will be populated by a + different RelatedPopulator instance. The RelatedPopulator instances get + klass_info and select (computed in SQLCompiler) plus the used db as + input for initialization. That data is used to compute which columns + to use, how to instantiate the model, and how to populate the links + between the objects. + + The actual creation of the objects is done in populate() method. This + method gets row and from_obj as input and populates the select_related() + model instance. + """ + + def __init__(self, klass_info, select, db): + self.db = db + # Pre-compute needed attributes. The attributes are: + # - model_cls: the possibly deferred model class to instantiate + # - either: + # - cols_start, cols_end: usually the columns in the row are + # in the same order model_cls.__init__ expects them, so we + # can instantiate by model_cls(*row[cols_start:cols_end]) + # - reorder_for_init: When select_related descends to a child + # class, then we want to reuse the already selected parent + # data. However, in this case the parent data isn't necessarily + # in the same order that Model.__init__ expects it to be, so + # we have to reorder the parent data. The reorder_for_init + # attribute contains a function used to reorder the field data + # in the order __init__ expects it. + # - pk_idx: the index of the primary key field in the reordered + # model data. Used to check if a related object exists at all. + # - init_list: the field attnames fetched from the database. For + # deferred models this isn't the same as all attnames of the + # model's fields. + # - related_populators: a list of RelatedPopulator instances if + # select_related() descends to related models from this model. + # - local_setter, remote_setter: Methods to set cached values on + # the object being populated and on the remote object. Usually + # these are Field.set_cached_value() methods. + select_fields = klass_info["select_fields"] + from_parent = klass_info["from_parent"] + if not from_parent: + self.cols_start = select_fields[0] + self.cols_end = select_fields[-1] + 1 + self.init_list = [ + f[0].target.attname for f in select[self.cols_start : self.cols_end] + ] + self.reorder_for_init = None + else: + attname_indexes = { + select[idx][0].target.attname: idx for idx in select_fields + } + model_init_attnames = ( + f.attname for f in klass_info["model"]._meta.concrete_fields + ) + self.init_list = [ + attname for attname in model_init_attnames if attname in attname_indexes + ] + self.reorder_for_init = operator.itemgetter( + *[attname_indexes[attname] for attname in self.init_list] + ) + + self.model_cls = klass_info["model"] + self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) + self.related_populators = get_related_populators(klass_info, select, self.db) + self.local_setter = klass_info["local_setter"] + self.remote_setter = klass_info["remote_setter"] + + def populate(self, row, from_obj): + if self.reorder_for_init: + obj_data = self.reorder_for_init(row) + else: + obj_data = row[self.cols_start : self.cols_end] + if obj_data[self.pk_idx] is None: + obj = None + else: + obj = self.model_cls.from_db(self.db, self.init_list, obj_data) + for rel_iter in self.related_populators: + rel_iter.populate(row, obj) + self.local_setter(from_obj, obj) + if obj is not None: + self.remote_setter(obj, from_obj) + + +def get_related_populators(klass_info, select, db): + iterators = [] + related_klass_infos = klass_info.get("related_klass_infos", []) + for rel_klass_info in related_klass_infos: + rel_cls = RelatedPopulator(rel_klass_info, select, db) + iterators.append(rel_cls) + return iterators diff --git a/testbed/django__django/django/db/models/query_utils.py b/testbed/django__django/django/db/models/query_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..78148f76b0df6966a3424325b2093cdb22e321a6 --- /dev/null +++ b/testbed/django__django/django/db/models/query_utils.py @@ -0,0 +1,446 @@ +""" +Various data structures used in query construction. + +Factored out from django.db.models.query to avoid making the main module very +large and/or so that they can be used by other modules without getting into +circular import difficulties. +""" +import functools +import inspect +import logging +from collections import namedtuple + +from django.core.exceptions import FieldError +from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections +from django.db.models.constants import LOOKUP_SEP +from django.utils import tree + +logger = logging.getLogger("django.db.models") + +# PathInfo is used when converting lookups (fk__somecol). The contents +# describe the relation in Model terms (model Options and Fields for both +# sides of the relation. The join_field is the field backing the relation. +PathInfo = namedtuple( + "PathInfo", + "from_opts to_opts target_fields join_field m2m direct filtered_relation", +) + + +def subclasses(cls): + yield cls + for subclass in cls.__subclasses__(): + yield from subclasses(subclass) + + +class Q(tree.Node): + """ + Encapsulate filters as objects that can then be combined logically (using + `&` and `|`). + """ + + # Connection types + AND = "AND" + OR = "OR" + XOR = "XOR" + default = AND + conditional = True + + def __init__(self, *args, _connector=None, _negated=False, **kwargs): + super().__init__( + children=[*args, *sorted(kwargs.items())], + connector=_connector, + negated=_negated, + ) + + def _combine(self, other, conn): + if getattr(other, "conditional", False) is False: + raise TypeError(other) + if not self: + return other.copy() + if not other and isinstance(other, Q): + return self.copy() + + obj = self.create(connector=conn) + obj.add(self, conn) + obj.add(other, conn) + return obj + + def __or__(self, other): + return self._combine(other, self.OR) + + def __and__(self, other): + return self._combine(other, self.AND) + + def __xor__(self, other): + return self._combine(other, self.XOR) + + def __invert__(self): + obj = self.copy() + obj.negate() + return obj + + def resolve_expression( + self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False + ): + # We must promote any new joins to left outer joins so that when Q is + # used as an expression, rows aren't filtered due to joins. + clause, joins = query._add_q( + self, + reuse, + allow_joins=allow_joins, + split_subq=False, + check_filterable=False, + summarize=summarize, + ) + query.promote_joins(joins) + return clause + + def flatten(self): + """ + Recursively yield this Q object and all subexpressions, in depth-first + order. + """ + yield self + for child in self.children: + if isinstance(child, tuple): + # Use the lookup. + child = child[1] + if hasattr(child, "flatten"): + yield from child.flatten() + else: + yield child + + def check(self, against, using=DEFAULT_DB_ALIAS): + """ + Do a database query to check if the expressions of the Q instance + matches against the expressions. + """ + # Avoid circular imports. + from django.db.models import BooleanField, Value + from django.db.models.functions import Coalesce + from django.db.models.sql import Query + from django.db.models.sql.constants import SINGLE + + query = Query(None) + for name, value in against.items(): + if not hasattr(value, "resolve_expression"): + value = Value(value) + query.add_annotation(value, name, select=False) + query.add_annotation(Value(1), "_check") + # This will raise a FieldError if a field is missing in "against". + if connections[using].features.supports_comparing_boolean_expr: + query.add_q(Q(Coalesce(self, True, output_field=BooleanField()))) + else: + query.add_q(self) + compiler = query.get_compiler(using=using) + try: + return compiler.execute_sql(SINGLE) is not None + except DatabaseError as e: + logger.warning("Got a database error calling check() on %r: %s", self, e) + return True + + def deconstruct(self): + path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) + if path.startswith("django.db.models.query_utils"): + path = path.replace("django.db.models.query_utils", "django.db.models") + args = tuple(self.children) + kwargs = {} + if self.connector != self.default: + kwargs["_connector"] = self.connector + if self.negated: + kwargs["_negated"] = True + return path, args, kwargs + + +class DeferredAttribute: + """ + A wrapper for a deferred-loading field. When the value is read from this + object the first time, the query is executed. + """ + + def __init__(self, field): + self.field = field + + def __get__(self, instance, cls=None): + """ + Retrieve and caches the value from the datastore on the first lookup. + Return the cached value. + """ + if instance is None: + return self + data = instance.__dict__ + field_name = self.field.attname + if field_name not in data: + # Let's see if the field is part of the parent chain. If so we + # might be able to reuse the already loaded value. Refs #18343. + val = self._check_parent_chain(instance) + if val is None: + instance.refresh_from_db(fields=[field_name]) + else: + data[field_name] = val + return data[field_name] + + def _check_parent_chain(self, instance): + """ + Check if the field value can be fetched from a parent field already + loaded in the instance. This can be done if the to-be fetched + field is a primary key field. + """ + opts = instance._meta + link_field = opts.get_ancestor_link(self.field.model) + if self.field.primary_key and self.field != link_field: + return getattr(instance, link_field.attname) + return None + + +class class_or_instance_method: + """ + Hook used in RegisterLookupMixin to return partial functions depending on + the caller type (instance or class of models.Field). + """ + + def __init__(self, class_method, instance_method): + self.class_method = class_method + self.instance_method = instance_method + + def __get__(self, instance, owner): + if instance is None: + return functools.partial(self.class_method, owner) + return functools.partial(self.instance_method, instance) + + +class RegisterLookupMixin: + def _get_lookup(self, lookup_name): + return self.get_lookups().get(lookup_name, None) + + @functools.cache + def get_class_lookups(cls): + class_lookups = [ + parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls) + ] + return cls.merge_dicts(class_lookups) + + def get_instance_lookups(self): + class_lookups = self.get_class_lookups() + if instance_lookups := getattr(self, "instance_lookups", None): + return {**class_lookups, **instance_lookups} + return class_lookups + + get_lookups = class_or_instance_method(get_class_lookups, get_instance_lookups) + get_class_lookups = classmethod(get_class_lookups) + + def get_lookup(self, lookup_name): + from django.db.models.lookups import Lookup + + found = self._get_lookup(lookup_name) + if found is None and hasattr(self, "output_field"): + return self.output_field.get_lookup(lookup_name) + if found is not None and not issubclass(found, Lookup): + return None + return found + + def get_transform(self, lookup_name): + from django.db.models.lookups import Transform + + found = self._get_lookup(lookup_name) + if found is None and hasattr(self, "output_field"): + return self.output_field.get_transform(lookup_name) + if found is not None and not issubclass(found, Transform): + return None + return found + + @staticmethod + def merge_dicts(dicts): + """ + Merge dicts in reverse to preference the order of the original list. e.g., + merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'. + """ + merged = {} + for d in reversed(dicts): + merged.update(d) + return merged + + @classmethod + def _clear_cached_class_lookups(cls): + for subclass in subclasses(cls): + subclass.get_class_lookups.cache_clear() + + def register_class_lookup(cls, lookup, lookup_name=None): + if lookup_name is None: + lookup_name = lookup.lookup_name + if "class_lookups" not in cls.__dict__: + cls.class_lookups = {} + cls.class_lookups[lookup_name] = lookup + cls._clear_cached_class_lookups() + return lookup + + def register_instance_lookup(self, lookup, lookup_name=None): + if lookup_name is None: + lookup_name = lookup.lookup_name + if "instance_lookups" not in self.__dict__: + self.instance_lookups = {} + self.instance_lookups[lookup_name] = lookup + return lookup + + register_lookup = class_or_instance_method( + register_class_lookup, register_instance_lookup + ) + register_class_lookup = classmethod(register_class_lookup) + + def _unregister_class_lookup(cls, lookup, lookup_name=None): + """ + Remove given lookup from cls lookups. For use in tests only as it's + not thread-safe. + """ + if lookup_name is None: + lookup_name = lookup.lookup_name + del cls.class_lookups[lookup_name] + cls._clear_cached_class_lookups() + + def _unregister_instance_lookup(self, lookup, lookup_name=None): + """ + Remove given lookup from instance lookups. For use in tests only as + it's not thread-safe. + """ + if lookup_name is None: + lookup_name = lookup.lookup_name + del self.instance_lookups[lookup_name] + + _unregister_lookup = class_or_instance_method( + _unregister_class_lookup, _unregister_instance_lookup + ) + _unregister_class_lookup = classmethod(_unregister_class_lookup) + + +def select_related_descend(field, restricted, requested, select_mask, reverse=False): + """ + Return True if this field should be used to descend deeper for + select_related() purposes. Used by both the query construction code + (compiler.get_related_selections()) and the model instance creation code + (compiler.klass_info). + + Arguments: + * field - the field to be checked + * restricted - a boolean field, indicating if the field list has been + manually restricted using a requested clause) + * requested - The select_related() dictionary. + * select_mask - the dictionary of selected fields. + * reverse - boolean, True if we are checking a reverse select related + """ + if not field.remote_field: + return False + if field.remote_field.parent_link and not reverse: + return False + if restricted: + if reverse and field.related_query_name() not in requested: + return False + if not reverse and field.name not in requested: + return False + if not restricted and field.null: + return False + if ( + restricted + and select_mask + and field.name in requested + and field not in select_mask + ): + raise FieldError( + f"Field {field.model._meta.object_name}.{field.name} cannot be both " + "deferred and traversed using select_related at the same time." + ) + return True + + +def refs_expression(lookup_parts, annotations): + """ + Check if the lookup_parts contains references to the given annotations set. + Because the LOOKUP_SEP is contained in the default annotation names, check + each prefix of the lookup_parts for a match. + """ + for n in range(1, len(lookup_parts) + 1): + level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n]) + if annotations.get(level_n_lookup): + return level_n_lookup, lookup_parts[n:] + return None, () + + +def check_rel_lookup_compatibility(model, target_opts, field): + """ + Check that self.model is compatible with target_opts. Compatibility + is OK if: + 1) model and opts match (where proxy inheritance is removed) + 2) model is parent of opts' model or the other way around + """ + + def check(opts): + return ( + model._meta.concrete_model == opts.concrete_model + or opts.concrete_model in model._meta.get_parent_list() + or model in opts.get_parent_list() + ) + + # If the field is a primary key, then doing a query against the field's + # model is ok, too. Consider the case: + # class Restaurant(models.Model): + # place = OneToOneField(Place, primary_key=True): + # Restaurant.objects.filter(pk__in=Restaurant.objects.all()). + # If we didn't have the primary key check, then pk__in (== place__in) would + # give Place's opts as the target opts, but Restaurant isn't compatible + # with that. This logic applies only to primary keys, as when doing __in=qs, + # we are going to turn this into __in=qs.values('pk') later on. + return check(target_opts) or ( + getattr(field, "primary_key", False) and check(field.model._meta) + ) + + +class FilteredRelation: + """Specify custom filtering in the ON clause of SQL joins.""" + + def __init__(self, relation_name, *, condition=Q()): + if not relation_name: + raise ValueError("relation_name cannot be empty.") + self.relation_name = relation_name + self.alias = None + if not isinstance(condition, Q): + raise ValueError("condition argument must be a Q() instance.") + # .condition and .resolved_condition have to be stored independently + # as the former must remain unchanged for Join.__eq__ to remain stable + # and reusable even once their .filtered_relation are resolved. + self.condition = condition + self.resolved_condition = None + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + self.relation_name == other.relation_name + and self.alias == other.alias + and self.condition == other.condition + ) + + def clone(self): + clone = FilteredRelation(self.relation_name, condition=self.condition) + clone.alias = self.alias + if (resolved_condition := self.resolved_condition) is not None: + clone.resolved_condition = resolved_condition.clone() + return clone + + def relabeled_clone(self, change_map): + clone = self.clone() + if resolved_condition := clone.resolved_condition: + clone.resolved_condition = resolved_condition.relabeled_clone(change_map) + return clone + + def resolve_expression(self, query, reuse, *args, **kwargs): + clone = self.clone() + clone.resolved_condition = query.build_filter( + self.condition, + can_reuse=reuse, + allow_joins=True, + split_subq=False, + update_join_types=False, + )[0] + return clone + + def as_sql(self, compiler, connection): + return compiler.compile(self.resolved_condition) diff --git a/testbed/django__django/django/db/models/signals.py b/testbed/django__django/django/db/models/signals.py new file mode 100644 index 0000000000000000000000000000000000000000..a0720937af314936e6a6da0275d83f1584a579ad --- /dev/null +++ b/testbed/django__django/django/db/models/signals.py @@ -0,0 +1,54 @@ +from functools import partial + +from django.db.models.utils import make_model_tuple +from django.dispatch import Signal + +class_prepared = Signal() + + +class ModelSignal(Signal): + """ + Signal subclass that allows the sender to be lazily specified as a string + of the `app_label.ModelName` form. + """ + + def _lazy_method(self, method, apps, receiver, sender, **kwargs): + from django.db.models.options import Options + + # This partial takes a single optional argument named "sender". + partial_method = partial(method, receiver, **kwargs) + if isinstance(sender, str): + apps = apps or Options.default_apps + apps.lazy_model_operation(partial_method, make_model_tuple(sender)) + else: + return partial_method(sender) + + def connect(self, receiver, sender=None, weak=True, dispatch_uid=None, apps=None): + self._lazy_method( + super().connect, + apps, + receiver, + sender, + weak=weak, + dispatch_uid=dispatch_uid, + ) + + def disconnect(self, receiver=None, sender=None, dispatch_uid=None, apps=None): + return self._lazy_method( + super().disconnect, apps, receiver, sender, dispatch_uid=dispatch_uid + ) + + +pre_init = ModelSignal(use_caching=True) +post_init = ModelSignal(use_caching=True) + +pre_save = ModelSignal(use_caching=True) +post_save = ModelSignal(use_caching=True) + +pre_delete = ModelSignal(use_caching=True) +post_delete = ModelSignal(use_caching=True) + +m2m_changed = ModelSignal(use_caching=True) + +pre_migrate = Signal() +post_migrate = Signal() diff --git a/testbed/django__django/django/db/models/sql/__init__.py b/testbed/django__django/django/db/models/sql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dd31a6ea9e63f4216d7e05728dc09f6e01aaf5fa --- /dev/null +++ b/testbed/django__django/django/db/models/sql/__init__.py @@ -0,0 +1,6 @@ +from django.db.models.sql.query import * # NOQA +from django.db.models.sql.query import Query +from django.db.models.sql.subqueries import * # NOQA +from django.db.models.sql.where import AND, OR, XOR + +__all__ = ["Query", "AND", "OR", "XOR"] diff --git a/testbed/django__django/django/db/models/sql/compiler.py b/testbed/django__django/django/db/models/sql/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b28dc925ba37e527a0fec91d5dc2bafb7aff2741 --- /dev/null +++ b/testbed/django__django/django/db/models/sql/compiler.py @@ -0,0 +1,2099 @@ +import collections +import json +import re +from functools import partial +from itertools import chain + +from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet +from django.db import DatabaseError, NotSupportedError +from django.db.models.constants import LOOKUP_SEP +from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value +from django.db.models.functions import Cast, Random +from django.db.models.lookups import Lookup +from django.db.models.query_utils import select_related_descend +from django.db.models.sql.constants import ( + CURSOR, + GET_ITERATOR_CHUNK_SIZE, + MULTI, + NO_RESULTS, + ORDER_DIR, + SINGLE, +) +from django.db.models.sql.query import Query, get_order_dir +from django.db.models.sql.where import AND +from django.db.transaction import TransactionManagementError +from django.utils.functional import cached_property +from django.utils.hashable import make_hashable +from django.utils.regex_helper import _lazy_re_compile + + +class PositionRef(Ref): + def __init__(self, ordinal, refs, source): + self.ordinal = ordinal + super().__init__(refs, source) + + def as_sql(self, compiler, connection): + return str(self.ordinal), () + + +class SQLCompiler: + # Multiline ordering SQL clause may appear from RawSQL. + ordering_parts = _lazy_re_compile( + r"^(.*)\s(?:ASC|DESC).*", + re.MULTILINE | re.DOTALL, + ) + + def __init__(self, query, connection, using, elide_empty=True): + self.query = query + self.connection = connection + self.using = using + # Some queries, e.g. coalesced aggregation, need to be executed even if + # they would return an empty result set. + self.elide_empty = elide_empty + self.quote_cache = {"*": "*"} + # The select, klass_info, and annotations are needed by QuerySet.iterator() + # these are set as a side-effect of executing the query. Note that we calculate + # separately a list of extra select columns needed for grammatical correctness + # of the query, but these columns are not included in self.select. + self.select = None + self.annotation_col_map = None + self.klass_info = None + self._meta_ordering = None + + def __repr__(self): + return ( + f"<{self.__class__.__qualname__} " + f"model={self.query.model.__qualname__} " + f"connection={self.connection!r} using={self.using!r}>" + ) + + def setup_query(self, with_col_aliases=False): + if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): + self.query.get_initial_alias() + self.select, self.klass_info, self.annotation_col_map = self.get_select( + with_col_aliases=with_col_aliases, + ) + self.col_count = len(self.select) + + def pre_sql_setup(self, with_col_aliases=False): + """ + Do any necessary class setup immediately prior to producing SQL. This + is for things that can't necessarily be done in __init__ because we + might not have all the pieces in place at that time. + """ + self.setup_query(with_col_aliases=with_col_aliases) + order_by = self.get_order_by() + self.where, self.having, self.qualify = self.query.where.split_having_qualify( + must_group_by=self.query.group_by is not None + ) + extra_select = self.get_extra_select(order_by, self.select) + self.has_extra_select = bool(extra_select) + group_by = self.get_group_by(self.select + extra_select, order_by) + return extra_select, order_by, group_by + + def get_group_by(self, select, order_by): + """ + Return a list of 2-tuples of form (sql, params). + + The logic of what exactly the GROUP BY clause contains is hard + to describe in other words than "if it passes the test suite, + then it is correct". + """ + # Some examples: + # SomeModel.objects.annotate(Count('somecol')) + # GROUP BY: all fields of the model + # + # SomeModel.objects.values('name').annotate(Count('somecol')) + # GROUP BY: name + # + # SomeModel.objects.annotate(Count('somecol')).values('name') + # GROUP BY: all cols of the model + # + # SomeModel.objects.values('name', 'pk') + # .annotate(Count('somecol')).values('pk') + # GROUP BY: name, pk + # + # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') + # GROUP BY: name, pk + # + # In fact, the self.query.group_by is the minimal set to GROUP BY. It + # can't be ever restricted to a smaller set, but additional columns in + # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately + # the end result is that it is impossible to force the query to have + # a chosen GROUP BY clause - you can almost do this by using the form: + # .values(*wanted_cols).annotate(AnAggregate()) + # but any later annotations, extra selects, values calls that + # refer some column outside of the wanted_cols, order_by, or even + # filter calls can alter the GROUP BY clause. + + # The query.group_by is either None (no GROUP BY at all), True + # (group by select fields), or a list of expressions to be added + # to the group by. + if self.query.group_by is None: + return [] + expressions = [] + group_by_refs = set() + if self.query.group_by is not True: + # If the group by is set to a list (by .values() call most likely), + # then we need to add everything in it to the GROUP BY clause. + # Backwards compatibility hack for setting query.group_by. Remove + # when we have public API way of forcing the GROUP BY clause. + # Converts string references to expressions. + for expr in self.query.group_by: + if not hasattr(expr, "as_sql"): + expr = self.query.resolve_ref(expr) + if isinstance(expr, Ref): + if expr.refs not in group_by_refs: + group_by_refs.add(expr.refs) + expressions.append(expr.source) + else: + expressions.append(expr) + # Note that even if the group_by is set, it is only the minimal + # set to group by. So, we need to add cols in select, order_by, and + # having into the select in any case. + selected_expr_positions = {} + for ordinal, (expr, _, alias) in enumerate(select, start=1): + if alias: + selected_expr_positions[expr] = ordinal + # Skip members of the select clause that are already explicitly + # grouped against. + if alias in group_by_refs: + continue + expressions.extend(expr.get_group_by_cols()) + if not self._meta_ordering: + for expr, (sql, params, is_ref) in order_by: + # Skip references to the SELECT clause, as all expressions in + # the SELECT clause are already part of the GROUP BY. + if not is_ref: + expressions.extend(expr.get_group_by_cols()) + having_group_by = self.having.get_group_by_cols() if self.having else () + for expr in having_group_by: + expressions.append(expr) + result = [] + seen = set() + expressions = self.collapse_group_by(expressions, having_group_by) + + allows_group_by_select_index = ( + self.connection.features.allows_group_by_select_index + ) + for expr in expressions: + try: + sql, params = self.compile(expr) + except (EmptyResultSet, FullResultSet): + continue + if ( + allows_group_by_select_index + and (position := selected_expr_positions.get(expr)) is not None + ): + sql, params = str(position), () + else: + sql, params = expr.select_format(self, sql, params) + params_hash = make_hashable(params) + if (sql, params_hash) not in seen: + result.append((sql, params)) + seen.add((sql, params_hash)) + return result + + def collapse_group_by(self, expressions, having): + # If the database supports group by functional dependence reduction, + # then the expressions can be reduced to the set of selected table + # primary keys as all other columns are functionally dependent on them. + if self.connection.features.allows_group_by_selected_pks: + # Filter out all expressions associated with a table's primary key + # present in the grouped columns. This is done by identifying all + # tables that have their primary key included in the grouped + # columns and removing non-primary key columns referring to them. + # Unmanaged models are excluded because they could be representing + # database views on which the optimization might not be allowed. + pks = { + expr + for expr in expressions + if ( + hasattr(expr, "target") + and expr.target.primary_key + and self.connection.features.allows_group_by_selected_pks_on_model( + expr.target.model + ) + ) + } + aliases = {expr.alias for expr in pks} + expressions = [ + expr + for expr in expressions + if expr in pks + or expr in having + or getattr(expr, "alias", None) not in aliases + ] + return expressions + + def get_select(self, with_col_aliases=False): + """ + Return three values: + - a list of 3-tuples of (expression, (sql, params), alias) + - a klass_info structure, + - a dictionary of annotations + + The (sql, params) is what the expression will produce, and alias is the + "AS alias" for the column (possibly None). + + The klass_info structure contains the following information: + - The base model of the query. + - Which columns for that model are present in the query (by + position of the select clause). + - related_klass_infos: [f, klass_info] to descent into + + The annotations is a dictionary of {'attname': column position} values. + """ + select = [] + klass_info = None + annotations = {} + select_idx = 0 + for alias, (sql, params) in self.query.extra_select.items(): + annotations[alias] = select_idx + select.append((RawSQL(sql, params), alias)) + select_idx += 1 + assert not (self.query.select and self.query.default_cols) + select_mask = self.query.get_select_mask() + if self.query.default_cols: + cols = self.get_default_columns(select_mask) + else: + # self.query.select is a special case. These columns never go to + # any model. + cols = self.query.select + if cols: + select_list = [] + for col in cols: + select_list.append(select_idx) + select.append((col, None)) + select_idx += 1 + klass_info = { + "model": self.query.model, + "select_fields": select_list, + } + for alias, annotation in self.query.annotation_select.items(): + annotations[alias] = select_idx + select.append((annotation, alias)) + select_idx += 1 + + if self.query.select_related: + related_klass_infos = self.get_related_selections(select, select_mask) + klass_info["related_klass_infos"] = related_klass_infos + + def get_select_from_parent(klass_info): + for ki in klass_info["related_klass_infos"]: + if ki["from_parent"]: + ki["select_fields"] = ( + klass_info["select_fields"] + ki["select_fields"] + ) + get_select_from_parent(ki) + + get_select_from_parent(klass_info) + + ret = [] + col_idx = 1 + for col, alias in select: + try: + sql, params = self.compile(col) + except EmptyResultSet: + empty_result_set_value = getattr( + col, "empty_result_set_value", NotImplemented + ) + if empty_result_set_value is NotImplemented: + # Select a predicate that's always False. + sql, params = "0", () + else: + sql, params = self.compile(Value(empty_result_set_value)) + except FullResultSet: + sql, params = self.compile(Value(True)) + else: + sql, params = col.select_format(self, sql, params) + if alias is None and with_col_aliases: + alias = f"col{col_idx}" + col_idx += 1 + ret.append((col, (sql, params), alias)) + return ret, klass_info, annotations + + def _order_by_pairs(self): + if self.query.extra_order_by: + ordering = self.query.extra_order_by + elif not self.query.default_ordering: + ordering = self.query.order_by + elif self.query.order_by: + ordering = self.query.order_by + elif (meta := self.query.get_meta()) and meta.ordering: + ordering = meta.ordering + self._meta_ordering = ordering + else: + ordering = [] + if self.query.standard_ordering: + default_order, _ = ORDER_DIR["ASC"] + else: + default_order, _ = ORDER_DIR["DESC"] + + selected_exprs = {} + # Avoid computing `selected_exprs` if there is no `ordering` as it's + # relatively expensive. + if ordering and (select := self.select): + for ordinal, (expr, _, alias) in enumerate(select, start=1): + pos_expr = PositionRef(ordinal, alias, expr) + if alias: + selected_exprs[alias] = pos_expr + selected_exprs[expr] = pos_expr + + for field in ordering: + if hasattr(field, "resolve_expression"): + if isinstance(field, Value): + # output_field must be resolved for constants. + field = Cast(field, field.output_field) + if not isinstance(field, OrderBy): + field = field.asc() + if not self.query.standard_ordering: + field = field.copy() + field.reverse_ordering() + select_ref = selected_exprs.get(field.expression) + if select_ref or ( + isinstance(field.expression, F) + and (select_ref := selected_exprs.get(field.expression.name)) + ): + # Emulation of NULLS (FIRST|LAST) cannot be combined with + # the usage of ordering by position. + if ( + field.nulls_first is None and field.nulls_last is None + ) or self.connection.features.supports_order_by_nulls_modifier: + field = field.copy() + field.expression = select_ref + # Alias collisions are not possible when dealing with + # combined queries so fallback to it if emulation of NULLS + # handling is required. + elif self.query.combinator: + field = field.copy() + field.expression = Ref(select_ref.refs, select_ref.source) + yield field, select_ref is not None + continue + if field == "?": # random + yield OrderBy(Random()), False + continue + + col, order = get_order_dir(field, default_order) + descending = order == "DESC" + + if select_ref := selected_exprs.get(col): + # Reference to expression in SELECT clause + yield ( + OrderBy( + select_ref, + descending=descending, + ), + True, + ) + continue + if col in self.query.annotations: + # References to an expression which is masked out of the SELECT + # clause. + if self.query.combinator and self.select: + # Don't use the resolved annotation because other + # combinated queries might define it differently. + expr = F(col) + else: + expr = self.query.annotations[col] + if isinstance(expr, Value): + # output_field must be resolved for constants. + expr = Cast(expr, expr.output_field) + yield OrderBy(expr, descending=descending), False + continue + + if "." in field: + # This came in through an extra(order_by=...) addition. Pass it + # on verbatim. + table, col = col.split(".", 1) + yield ( + OrderBy( + RawSQL( + "%s.%s" % (self.quote_name_unless_alias(table), col), [] + ), + descending=descending, + ), + False, + ) + continue + + if self.query.extra and col in self.query.extra: + if col in self.query.extra_select: + yield ( + OrderBy( + Ref(col, RawSQL(*self.query.extra[col])), + descending=descending, + ), + True, + ) + else: + yield ( + OrderBy(RawSQL(*self.query.extra[col]), descending=descending), + False, + ) + else: + if self.query.combinator and self.select: + # Don't use the first model's field because other + # combinated queries might define it differently. + yield OrderBy(F(col), descending=descending), False + else: + # 'col' is of the form 'field' or 'field1__field2' or + # '-field1__field2__field', etc. + yield from self.find_ordering_name( + field, + self.query.get_meta(), + default_order=default_order, + ) + + def get_order_by(self): + """ + Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for + the ORDER BY clause. + + The order_by clause can alter the select clause (for example it can add + aliases to clauses that do not yet have one, or it can add totally new + select clauses). + """ + result = [] + seen = set() + for expr, is_ref in self._order_by_pairs(): + resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) + if not is_ref and self.query.combinator and self.select: + src = resolved.expression + expr_src = expr.expression + for sel_expr, _, col_alias in self.select: + if src == sel_expr: + # When values() is used the exact alias must be used to + # reference annotations. + if ( + self.query.has_select_fields + and col_alias in self.query.annotation_select + and not ( + isinstance(expr_src, F) and col_alias == expr_src.name + ) + ): + continue + resolved.set_source_expressions( + [Ref(col_alias if col_alias else src.target.column, src)] + ) + break + else: + # Add column used in ORDER BY clause to the selected + # columns and to each combined query. + order_by_idx = len(self.query.select) + 1 + col_alias = f"__orderbycol{order_by_idx}" + for q in self.query.combined_queries: + # If fields were explicitly selected through values() + # combined queries cannot be augmented. + if q.has_select_fields: + raise DatabaseError( + "ORDER BY term does not match any column in " + "the result set." + ) + q.add_annotation(expr_src, col_alias) + self.query.add_select_col(resolved, col_alias) + resolved.set_source_expressions([Ref(col_alias, src)]) + sql, params = self.compile(resolved) + # Don't add the same column twice, but the order direction is + # not taken into account so we strip it. When this entire method + # is refactored into expressions, then we can check each part as we + # generate it. + without_ordering = self.ordering_parts.search(sql)[1] + params_hash = make_hashable(params) + if (without_ordering, params_hash) in seen: + continue + seen.add((without_ordering, params_hash)) + result.append((resolved, (sql, params, is_ref))) + return result + + def get_extra_select(self, order_by, select): + extra_select = [] + if self.query.distinct and not self.query.distinct_fields: + select_sql = [t[1] for t in select] + for expr, (sql, params, is_ref) in order_by: + without_ordering = self.ordering_parts.search(sql)[1] + if not is_ref and (without_ordering, params) not in select_sql: + extra_select.append((expr, (without_ordering, params), None)) + return extra_select + + def quote_name_unless_alias(self, name): + """ + A wrapper around connection.ops.quote_name that doesn't quote aliases + for table names. This avoids problems with some SQL dialects that treat + quoted strings specially (e.g. PostgreSQL). + """ + if name in self.quote_cache: + return self.quote_cache[name] + if ( + (name in self.query.alias_map and name not in self.query.table_map) + or name in self.query.extra_select + or ( + self.query.external_aliases.get(name) + and name not in self.query.table_map + ) + ): + self.quote_cache[name] = name + return name + r = self.connection.ops.quote_name(name) + self.quote_cache[name] = r + return r + + def compile(self, node): + vendor_impl = getattr(node, "as_" + self.connection.vendor, None) + if vendor_impl: + sql, params = vendor_impl(self, self.connection) + else: + sql, params = node.as_sql(self, self.connection) + return sql, params + + def get_combinator_sql(self, combinator, all): + features = self.connection.features + compilers = [ + query.get_compiler(self.using, self.connection, self.elide_empty) + for query in self.query.combined_queries + ] + if not features.supports_slicing_ordering_in_compound: + for compiler in compilers: + if compiler.query.is_sliced: + raise DatabaseError( + "LIMIT/OFFSET not allowed in subqueries of compound statements." + ) + if compiler.get_order_by(): + raise DatabaseError( + "ORDER BY not allowed in subqueries of compound statements." + ) + elif self.query.is_sliced and combinator == "union": + for compiler in compilers: + # A sliced union cannot have its parts elided as some of them + # might be sliced as well and in the event where only a single + # part produces a non-empty resultset it might be impossible to + # generate valid SQL. + compiler.elide_empty = False + parts = () + for compiler in compilers: + try: + # If the columns list is limited, then all combined queries + # must have the same columns list. Set the selects defined on + # the query on all combined queries, if not already set. + if not compiler.query.values_select and self.query.values_select: + compiler.query = compiler.query.clone() + compiler.query.set_values( + ( + *self.query.extra_select, + *self.query.values_select, + *self.query.annotation_select, + ) + ) + part_sql, part_args = compiler.as_sql(with_col_aliases=True) + if compiler.query.combinator: + # Wrap in a subquery if wrapping in parentheses isn't + # supported. + if not features.supports_parentheses_in_compound: + part_sql = "SELECT * FROM ({})".format(part_sql) + # Add parentheses when combining with compound query if not + # already added for all compound queries. + elif ( + self.query.subquery + or not features.supports_slicing_ordering_in_compound + ): + part_sql = "({})".format(part_sql) + elif ( + self.query.subquery + and features.supports_slicing_ordering_in_compound + ): + part_sql = "({})".format(part_sql) + parts += ((part_sql, part_args),) + except EmptyResultSet: + # Omit the empty queryset with UNION and with DIFFERENCE if the + # first queryset is nonempty. + if combinator == "union" or (combinator == "difference" and parts): + continue + raise + if not parts: + raise EmptyResultSet + combinator_sql = self.connection.ops.set_operators[combinator] + if all and combinator == "union": + combinator_sql += " ALL" + braces = "{}" + if not self.query.subquery and features.supports_slicing_ordering_in_compound: + braces = "({})" + sql_parts, args_parts = zip( + *((braces.format(sql), args) for sql, args in parts) + ) + result = [" {} ".format(combinator_sql).join(sql_parts)] + params = [] + for part in args_parts: + params.extend(part) + return result, params + + def get_qualify_sql(self): + where_parts = [] + if self.where: + where_parts.append(self.where) + if self.having: + where_parts.append(self.having) + inner_query = self.query.clone() + inner_query.subquery = True + inner_query.where = inner_query.where.__class__(where_parts) + # Augment the inner query with any window function references that + # might have been masked via values() and alias(). If any masked + # aliases are added they'll be masked again to avoid fetching + # the data in the `if qual_aliases` branch below. + select = { + expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0] + } + select_aliases = set(select.values()) + qual_aliases = set() + replacements = {} + + def collect_replacements(expressions): + while expressions: + expr = expressions.pop() + if expr in replacements: + continue + elif select_alias := select.get(expr): + replacements[expr] = select_alias + elif isinstance(expr, Lookup): + expressions.extend(expr.get_source_expressions()) + elif isinstance(expr, Ref): + if expr.refs not in select_aliases: + expressions.extend(expr.get_source_expressions()) + else: + num_qual_alias = len(qual_aliases) + select_alias = f"qual{num_qual_alias}" + qual_aliases.add(select_alias) + inner_query.add_annotation(expr, select_alias) + replacements[expr] = select_alias + + collect_replacements(list(self.qualify.leaves())) + self.qualify = self.qualify.replace_expressions( + {expr: Ref(alias, expr) for expr, alias in replacements.items()} + ) + order_by = [] + for order_by_expr, *_ in self.get_order_by(): + collect_replacements(order_by_expr.get_source_expressions()) + order_by.append( + order_by_expr.replace_expressions( + {expr: Ref(alias, expr) for expr, alias in replacements.items()} + ) + ) + inner_query_compiler = inner_query.get_compiler( + self.using, connection=self.connection, elide_empty=self.elide_empty + ) + inner_sql, inner_params = inner_query_compiler.as_sql( + # The limits must be applied to the outer query to avoid pruning + # results too eagerly. + with_limits=False, + # Force unique aliasing of selected columns to avoid collisions + # and make rhs predicates referencing easier. + with_col_aliases=True, + ) + qualify_sql, qualify_params = self.compile(self.qualify) + result = [ + "SELECT * FROM (", + inner_sql, + ")", + self.connection.ops.quote_name("qualify"), + "WHERE", + qualify_sql, + ] + if qual_aliases: + # If some select aliases were unmasked for filtering purposes they + # must be masked back. + cols = [self.connection.ops.quote_name(alias) for alias in select.values()] + result = [ + "SELECT", + ", ".join(cols), + "FROM (", + *result, + ")", + self.connection.ops.quote_name("qualify_mask"), + ] + params = list(inner_params) + qualify_params + # As the SQL spec is unclear on whether or not derived tables + # ordering must propagate it has to be explicitly repeated on the + # outer-most query to ensure it's preserved. + if order_by: + ordering_sqls = [] + for ordering in order_by: + ordering_sql, ordering_params = self.compile(ordering) + ordering_sqls.append(ordering_sql) + params.extend(ordering_params) + result.extend(["ORDER BY", ", ".join(ordering_sqls)]) + return result, params + + def as_sql(self, with_limits=True, with_col_aliases=False): + """ + Create the SQL for this query. Return the SQL string and list of + parameters. + + If 'with_limits' is False, any limit/offset information is not included + in the query. + """ + refcounts_before = self.query.alias_refcount.copy() + try: + combinator = self.query.combinator + extra_select, order_by, group_by = self.pre_sql_setup( + with_col_aliases=with_col_aliases or bool(combinator), + ) + for_update_part = None + # Is a LIMIT/OFFSET clause needed? + with_limit_offset = with_limits and self.query.is_sliced + combinator = self.query.combinator + features = self.connection.features + if combinator: + if not getattr(features, "supports_select_{}".format(combinator)): + raise NotSupportedError( + "{} is not supported on this database backend.".format( + combinator + ) + ) + result, params = self.get_combinator_sql( + combinator, self.query.combinator_all + ) + elif self.qualify: + result, params = self.get_qualify_sql() + order_by = None + else: + distinct_fields, distinct_params = self.get_distinct() + # This must come after 'select', 'ordering', and 'distinct' + # (see docstring of get_from_clause() for details). + from_, f_params = self.get_from_clause() + try: + where, w_params = ( + self.compile(self.where) if self.where is not None else ("", []) + ) + except EmptyResultSet: + if self.elide_empty: + raise + # Use a predicate that's always False. + where, w_params = "0 = 1", [] + except FullResultSet: + where, w_params = "", [] + try: + having, h_params = ( + self.compile(self.having) + if self.having is not None + else ("", []) + ) + except FullResultSet: + having, h_params = "", [] + result = ["SELECT"] + params = [] + + if self.query.distinct: + distinct_result, distinct_params = self.connection.ops.distinct_sql( + distinct_fields, + distinct_params, + ) + result += distinct_result + params += distinct_params + + out_cols = [] + for _, (s_sql, s_params), alias in self.select + extra_select: + if alias: + s_sql = "%s AS %s" % ( + s_sql, + self.connection.ops.quote_name(alias), + ) + params.extend(s_params) + out_cols.append(s_sql) + + result += [", ".join(out_cols)] + if from_: + result += ["FROM", *from_] + elif self.connection.features.bare_select_suffix: + result += [self.connection.features.bare_select_suffix] + params.extend(f_params) + + if self.query.select_for_update and features.has_select_for_update: + if ( + self.connection.get_autocommit() + # Don't raise an exception when database doesn't + # support transactions, as it's a noop. + and features.supports_transactions + ): + raise TransactionManagementError( + "select_for_update cannot be used outside of a transaction." + ) + + if ( + with_limit_offset + and not features.supports_select_for_update_with_limit + ): + raise NotSupportedError( + "LIMIT/OFFSET is not supported with " + "select_for_update on this database backend." + ) + nowait = self.query.select_for_update_nowait + skip_locked = self.query.select_for_update_skip_locked + of = self.query.select_for_update_of + no_key = self.query.select_for_no_key_update + # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the + # backend doesn't support it, raise NotSupportedError to + # prevent a possible deadlock. + if nowait and not features.has_select_for_update_nowait: + raise NotSupportedError( + "NOWAIT is not supported on this database backend." + ) + elif skip_locked and not features.has_select_for_update_skip_locked: + raise NotSupportedError( + "SKIP LOCKED is not supported on this database backend." + ) + elif of and not features.has_select_for_update_of: + raise NotSupportedError( + "FOR UPDATE OF is not supported on this database backend." + ) + elif no_key and not features.has_select_for_no_key_update: + raise NotSupportedError( + "FOR NO KEY UPDATE is not supported on this " + "database backend." + ) + for_update_part = self.connection.ops.for_update_sql( + nowait=nowait, + skip_locked=skip_locked, + of=self.get_select_for_update_of_arguments(), + no_key=no_key, + ) + + if for_update_part and features.for_update_after_from: + result.append(for_update_part) + + if where: + result.append("WHERE %s" % where) + params.extend(w_params) + + grouping = [] + for g_sql, g_params in group_by: + grouping.append(g_sql) + params.extend(g_params) + if grouping: + if distinct_fields: + raise NotImplementedError( + "annotate() + distinct(fields) is not implemented." + ) + order_by = order_by or self.connection.ops.force_no_ordering() + result.append("GROUP BY %s" % ", ".join(grouping)) + if self._meta_ordering: + order_by = None + if having: + result.append("HAVING %s" % having) + params.extend(h_params) + + if self.query.explain_info: + result.insert( + 0, + self.connection.ops.explain_query_prefix( + self.query.explain_info.format, + **self.query.explain_info.options, + ), + ) + + if order_by: + ordering = [] + for _, (o_sql, o_params, _) in order_by: + ordering.append(o_sql) + params.extend(o_params) + order_by_sql = "ORDER BY %s" % ", ".join(ordering) + if combinator and features.requires_compound_order_by_subquery: + result = ["SELECT * FROM (", *result, ")", order_by_sql] + else: + result.append(order_by_sql) + + if with_limit_offset: + result.append( + self.connection.ops.limit_offset_sql( + self.query.low_mark, self.query.high_mark + ) + ) + + if for_update_part and not features.for_update_after_from: + result.append(for_update_part) + + if self.query.subquery and extra_select: + # If the query is used as a subquery, the extra selects would + # result in more columns than the left-hand side expression is + # expecting. This can happen when a subquery uses a combination + # of order_by() and distinct(), forcing the ordering expressions + # to be selected as well. Wrap the query in another subquery + # to exclude extraneous selects. + sub_selects = [] + sub_params = [] + for index, (select, _, alias) in enumerate(self.select, start=1): + if alias: + sub_selects.append( + "%s.%s" + % ( + self.connection.ops.quote_name("subquery"), + self.connection.ops.quote_name(alias), + ) + ) + else: + select_clone = select.relabeled_clone( + {select.alias: "subquery"} + ) + subselect, subparams = select_clone.as_sql( + self, self.connection + ) + sub_selects.append(subselect) + sub_params.extend(subparams) + return "SELECT %s FROM (%s) subquery" % ( + ", ".join(sub_selects), + " ".join(result), + ), tuple(sub_params + params) + + return " ".join(result), tuple(params) + finally: + # Finally do cleanup - get rid of the joins we created above. + self.query.reset_refcounts(refcounts_before) + + def get_default_columns( + self, select_mask, start_alias=None, opts=None, from_parent=None + ): + """ + Compute the default columns for selecting every field in the base + model. Will sometimes be called to pull in related models (e.g. via + select_related), in which case "opts" and "start_alias" will be given + to provide a starting point for the traversal. + + Return a list of strings, quoted appropriately for use in SQL + directly, as well as a set of aliases used in the select statement (if + 'as_pairs' is True, return a list of (alias, col_name) pairs instead + of strings as the first component and None as the second component). + """ + result = [] + if opts is None: + if (opts := self.query.get_meta()) is None: + return result + start_alias = start_alias or self.query.get_initial_alias() + # The 'seen_models' is used to optimize checking the needed parent + # alias for a given field. This also includes None -> start_alias to + # be used by local fields. + seen_models = {None: start_alias} + + for field in opts.concrete_fields: + model = field.model._meta.concrete_model + # A proxy model will have a different model and concrete_model. We + # will assign None if the field belongs to this model. + if model == opts.model: + model = None + if ( + from_parent + and model is not None + and issubclass( + from_parent._meta.concrete_model, model._meta.concrete_model + ) + ): + # Avoid loading data for already loaded parents. + # We end up here in the case select_related() resolution + # proceeds from parent model to child model. In that case the + # parent model data is already present in the SELECT clause, + # and we want to avoid reloading the same data again. + continue + if select_mask and field not in select_mask: + continue + alias = self.query.join_parent_model(opts, model, start_alias, seen_models) + column = field.get_col(alias) + result.append(column) + return result + + def get_distinct(self): + """ + Return a quoted list of fields to use in DISTINCT ON part of the query. + + This method can alter the tables in the query, and thus it must be + called before get_from_clause(). + """ + result = [] + params = [] + opts = self.query.get_meta() + + for name in self.query.distinct_fields: + parts = name.split(LOOKUP_SEP) + _, targets, alias, joins, path, _, transform_function = self._setup_joins( + parts, opts, None + ) + targets, alias, _ = self.query.trim_joins(targets, joins, path) + for target in targets: + if name in self.query.annotation_select: + result.append(self.connection.ops.quote_name(name)) + else: + r, p = self.compile(transform_function(target, alias)) + result.append(r) + params.append(p) + return result, params + + def find_ordering_name( + self, name, opts, alias=None, default_order="ASC", already_seen=None + ): + """ + Return the table alias (the name might be ambiguous, the alias will + not be) and column name for ordering by the given 'name' parameter. + The 'name' is of the form 'field1__field2__...__fieldN'. + """ + name, order = get_order_dir(name, default_order) + descending = order == "DESC" + pieces = name.split(LOOKUP_SEP) + ( + field, + targets, + alias, + joins, + path, + opts, + transform_function, + ) = self._setup_joins(pieces, opts, alias) + + # If we get to this point and the field is a relation to another model, + # append the default ordering for that model unless it is the pk + # shortcut or the attribute name of the field that is specified or + # there are transforms to process. + if ( + field.is_relation + and opts.ordering + and getattr(field, "attname", None) != pieces[-1] + and name != "pk" + and not getattr(transform_function, "has_transforms", False) + ): + # Firstly, avoid infinite loops. + already_seen = already_seen or set() + join_tuple = tuple( + getattr(self.query.alias_map[j], "join_cols", None) for j in joins + ) + if join_tuple in already_seen: + raise FieldError("Infinite loop caused by ordering.") + already_seen.add(join_tuple) + + results = [] + for item in opts.ordering: + if hasattr(item, "resolve_expression") and not isinstance( + item, OrderBy + ): + item = item.desc() if descending else item.asc() + if isinstance(item, OrderBy): + results.append( + (item.prefix_references(f"{name}{LOOKUP_SEP}"), False) + ) + continue + results.extend( + (expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref) + for expr, is_ref in self.find_ordering_name( + item, opts, alias, order, already_seen + ) + ) + return results + targets, alias, _ = self.query.trim_joins(targets, joins, path) + return [ + (OrderBy(transform_function(t, alias), descending=descending), False) + for t in targets + ] + + def _setup_joins(self, pieces, opts, alias): + """ + Helper method for get_order_by() and get_distinct(). + + get_ordering() and get_distinct() must produce same target columns on + same input, as the prefixes of get_ordering() and get_distinct() must + match. Executing SQL where this is not true is an error. + """ + alias = alias or self.query.get_initial_alias() + field, targets, opts, joins, path, transform_function = self.query.setup_joins( + pieces, opts, alias + ) + alias = joins[-1] + return field, targets, alias, joins, path, opts, transform_function + + def get_from_clause(self): + """ + Return a list of strings that are joined together to go after the + "FROM" part of the query, as well as a list any extra parameters that + need to be included. Subclasses, can override this to create a + from-clause via a "select". + + This should only be called after any SQL construction methods that + might change the tables that are needed. This means the select columns, + ordering, and distinct must be done first. + """ + result = [] + params = [] + for alias in tuple(self.query.alias_map): + if not self.query.alias_refcount[alias]: + continue + try: + from_clause = self.query.alias_map[alias] + except KeyError: + # Extra tables can end up in self.tables, but not in the + # alias_map if they aren't in a join. That's OK. We skip them. + continue + clause_sql, clause_params = self.compile(from_clause) + result.append(clause_sql) + params.extend(clause_params) + for t in self.query.extra_tables: + alias, _ = self.query.table_alias(t) + # Only add the alias if it's not already present (the table_alias() + # call increments the refcount, so an alias refcount of one means + # this is the only reference). + if ( + alias not in self.query.alias_map + or self.query.alias_refcount[alias] == 1 + ): + result.append(", %s" % self.quote_name_unless_alias(alias)) + return result, params + + def get_related_selections( + self, + select, + select_mask, + opts=None, + root_alias=None, + cur_depth=1, + requested=None, + restricted=None, + ): + """ + Fill in the information needed for a select_related query. The current + depth is measured as the number of connections away from the root model + (for example, cur_depth=1 means we are looking at models with direct + connections to the root model). + """ + + def _get_field_choices(): + direct_choices = (f.name for f in opts.fields if f.is_relation) + reverse_choices = ( + f.field.related_query_name() + for f in opts.related_objects + if f.field.unique + ) + return chain( + direct_choices, reverse_choices, self.query._filtered_relations + ) + + related_klass_infos = [] + if not restricted and cur_depth > self.query.max_depth: + # We've recursed far enough; bail out. + return related_klass_infos + + if not opts: + opts = self.query.get_meta() + root_alias = self.query.get_initial_alias() + + # Setup for the case when only particular related fields should be + # included in the related selection. + fields_found = set() + if requested is None: + restricted = isinstance(self.query.select_related, dict) + if restricted: + requested = self.query.select_related + + def get_related_klass_infos(klass_info, related_klass_infos): + klass_info["related_klass_infos"] = related_klass_infos + + for f in opts.fields: + fields_found.add(f.name) + + if restricted: + next = requested.get(f.name, {}) + if not f.is_relation: + # If a non-related field is used like a relation, + # or if a single non-relational field is given. + if next or f.name in requested: + raise FieldError( + "Non-relational field given in select_related: '%s'. " + "Choices are: %s" + % ( + f.name, + ", ".join(_get_field_choices()) or "(none)", + ) + ) + else: + next = False + + if not select_related_descend(f, restricted, requested, select_mask): + continue + related_select_mask = select_mask.get(f) or {} + klass_info = { + "model": f.remote_field.model, + "field": f, + "reverse": False, + "local_setter": f.set_cached_value, + "remote_setter": f.remote_field.set_cached_value + if f.unique + else lambda x, y: None, + "from_parent": False, + } + related_klass_infos.append(klass_info) + select_fields = [] + _, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias) + alias = joins[-1] + columns = self.get_default_columns( + related_select_mask, start_alias=alias, opts=f.remote_field.model._meta + ) + for col in columns: + select_fields.append(len(select)) + select.append((col, None)) + klass_info["select_fields"] = select_fields + next_klass_infos = self.get_related_selections( + select, + related_select_mask, + f.remote_field.model._meta, + alias, + cur_depth + 1, + next, + restricted, + ) + get_related_klass_infos(klass_info, next_klass_infos) + + if restricted: + related_fields = [ + (o.field, o.related_model) + for o in opts.related_objects + if o.field.unique and not o.many_to_many + ] + for related_field, model in related_fields: + related_select_mask = select_mask.get(related_field) or {} + if not select_related_descend( + related_field, + restricted, + requested, + related_select_mask, + reverse=True, + ): + continue + + related_field_name = related_field.related_query_name() + fields_found.add(related_field_name) + + join_info = self.query.setup_joins( + [related_field_name], opts, root_alias + ) + alias = join_info.joins[-1] + from_parent = issubclass(model, opts.model) and model is not opts.model + klass_info = { + "model": model, + "field": related_field, + "reverse": True, + "local_setter": related_field.remote_field.set_cached_value, + "remote_setter": related_field.set_cached_value, + "from_parent": from_parent, + } + related_klass_infos.append(klass_info) + select_fields = [] + columns = self.get_default_columns( + related_select_mask, + start_alias=alias, + opts=model._meta, + from_parent=opts.model, + ) + for col in columns: + select_fields.append(len(select)) + select.append((col, None)) + klass_info["select_fields"] = select_fields + next = requested.get(related_field.related_query_name(), {}) + next_klass_infos = self.get_related_selections( + select, + related_select_mask, + model._meta, + alias, + cur_depth + 1, + next, + restricted, + ) + get_related_klass_infos(klass_info, next_klass_infos) + + def local_setter(final_field, obj, from_obj): + # Set a reverse fk object when relation is non-empty. + if from_obj: + final_field.remote_field.set_cached_value(from_obj, obj) + + def local_setter_noop(obj, from_obj): + pass + + def remote_setter(name, obj, from_obj): + setattr(from_obj, name, obj) + + for name in list(requested): + # Filtered relations work only on the topmost level. + if cur_depth > 1: + break + if name in self.query._filtered_relations: + fields_found.add(name) + final_field, _, join_opts, joins, _, _ = self.query.setup_joins( + [name], opts, root_alias + ) + model = join_opts.model + alias = joins[-1] + from_parent = ( + issubclass(model, opts.model) and model is not opts.model + ) + klass_info = { + "model": model, + "field": final_field, + "reverse": True, + "local_setter": ( + partial(local_setter, final_field) + if len(joins) <= 2 + else local_setter_noop + ), + "remote_setter": partial(remote_setter, name), + "from_parent": from_parent, + } + related_klass_infos.append(klass_info) + select_fields = [] + field_select_mask = select_mask.get((name, final_field)) or {} + columns = self.get_default_columns( + field_select_mask, + start_alias=alias, + opts=model._meta, + from_parent=opts.model, + ) + for col in columns: + select_fields.append(len(select)) + select.append((col, None)) + klass_info["select_fields"] = select_fields + next_requested = requested.get(name, {}) + next_klass_infos = self.get_related_selections( + select, + field_select_mask, + opts=model._meta, + root_alias=alias, + cur_depth=cur_depth + 1, + requested=next_requested, + restricted=restricted, + ) + get_related_klass_infos(klass_info, next_klass_infos) + fields_not_found = set(requested).difference(fields_found) + if fields_not_found: + invalid_fields = ("'%s'" % s for s in fields_not_found) + raise FieldError( + "Invalid field name(s) given in select_related: %s. " + "Choices are: %s" + % ( + ", ".join(invalid_fields), + ", ".join(_get_field_choices()) or "(none)", + ) + ) + return related_klass_infos + + def get_select_for_update_of_arguments(self): + """ + Return a quoted list of arguments for the SELECT FOR UPDATE OF part of + the query. + """ + + def _get_parent_klass_info(klass_info): + concrete_model = klass_info["model"]._meta.concrete_model + for parent_model, parent_link in concrete_model._meta.parents.items(): + parent_list = parent_model._meta.get_parent_list() + yield { + "model": parent_model, + "field": parent_link, + "reverse": False, + "select_fields": [ + select_index + for select_index in klass_info["select_fields"] + # Selected columns from a model or its parents. + if ( + self.select[select_index][0].target.model == parent_model + or self.select[select_index][0].target.model in parent_list + ) + ], + } + + def _get_first_selected_col_from_model(klass_info): + """ + Find the first selected column from a model. If it doesn't exist, + don't lock a model. + + select_fields is filled recursively, so it also contains fields + from the parent models. + """ + concrete_model = klass_info["model"]._meta.concrete_model + for select_index in klass_info["select_fields"]: + if self.select[select_index][0].target.model == concrete_model: + return self.select[select_index][0] + + def _get_field_choices(): + """Yield all allowed field paths in breadth-first search order.""" + queue = collections.deque([(None, self.klass_info)]) + while queue: + parent_path, klass_info = queue.popleft() + if parent_path is None: + path = [] + yield "self" + else: + field = klass_info["field"] + if klass_info["reverse"]: + field = field.remote_field + path = parent_path + [field.name] + yield LOOKUP_SEP.join(path) + queue.extend( + (path, klass_info) + for klass_info in _get_parent_klass_info(klass_info) + ) + queue.extend( + (path, klass_info) + for klass_info in klass_info.get("related_klass_infos", []) + ) + + if not self.klass_info: + return [] + result = [] + invalid_names = [] + for name in self.query.select_for_update_of: + klass_info = self.klass_info + if name == "self": + col = _get_first_selected_col_from_model(klass_info) + else: + for part in name.split(LOOKUP_SEP): + klass_infos = ( + *klass_info.get("related_klass_infos", []), + *_get_parent_klass_info(klass_info), + ) + for related_klass_info in klass_infos: + field = related_klass_info["field"] + if related_klass_info["reverse"]: + field = field.remote_field + if field.name == part: + klass_info = related_klass_info + break + else: + klass_info = None + break + if klass_info is None: + invalid_names.append(name) + continue + col = _get_first_selected_col_from_model(klass_info) + if col is not None: + if self.connection.features.select_for_update_of_column: + result.append(self.compile(col)[0]) + else: + result.append(self.quote_name_unless_alias(col.alias)) + if invalid_names: + raise FieldError( + "Invalid field name(s) given in select_for_update(of=(...)): %s. " + "Only relational fields followed in the query are allowed. " + "Choices are: %s." + % ( + ", ".join(invalid_names), + ", ".join(_get_field_choices()), + ) + ) + return result + + def get_converters(self, expressions): + converters = {} + for i, expression in enumerate(expressions): + if expression: + backend_converters = self.connection.ops.get_db_converters(expression) + field_converters = expression.get_db_converters(self.connection) + if backend_converters or field_converters: + converters[i] = (backend_converters + field_converters, expression) + return converters + + def apply_converters(self, rows, converters): + connection = self.connection + converters = list(converters.items()) + for row in map(list, rows): + for pos, (convs, expression) in converters: + value = row[pos] + for converter in convs: + value = converter(value, expression, connection) + row[pos] = value + yield row + + def results_iter( + self, + results=None, + tuple_expected=False, + chunked_fetch=False, + chunk_size=GET_ITERATOR_CHUNK_SIZE, + ): + """Return an iterator over the results from executing this query.""" + if results is None: + results = self.execute_sql( + MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size + ) + fields = [s[0] for s in self.select[0 : self.col_count]] + converters = self.get_converters(fields) + rows = chain.from_iterable(results) + if converters: + rows = self.apply_converters(rows, converters) + if tuple_expected: + rows = map(tuple, rows) + return rows + + def has_results(self): + """ + Backends (e.g. NoSQL) can override this in order to use optimized + versions of "query has any results." + """ + return bool(self.execute_sql(SINGLE)) + + def execute_sql( + self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE + ): + """ + Run the query against the database and return the result(s). The + return value is a single data item if result_type is SINGLE, or an + iterator over the results if the result_type is MULTI. + + result_type is either MULTI (use fetchmany() to retrieve all rows), + SINGLE (only retrieve a single row), or None. In this last case, the + cursor is returned if any query is executed, since it's used by + subclasses such as InsertQuery). It's possible, however, that no query + is needed, as the filters describe an empty set. In that case, None is + returned, to avoid any unnecessary database interaction. + """ + result_type = result_type or NO_RESULTS + try: + sql, params = self.as_sql() + if not sql: + raise EmptyResultSet + except EmptyResultSet: + if result_type == MULTI: + return iter([]) + else: + return + if chunked_fetch: + cursor = self.connection.chunked_cursor() + else: + cursor = self.connection.cursor() + try: + cursor.execute(sql, params) + except Exception: + # Might fail for server-side cursors (e.g. connection closed) + cursor.close() + raise + + if result_type == CURSOR: + # Give the caller the cursor to process and close. + return cursor + if result_type == SINGLE: + try: + val = cursor.fetchone() + if val: + return val[0 : self.col_count] + return val + finally: + # done with the cursor + cursor.close() + if result_type == NO_RESULTS: + cursor.close() + return + + result = cursor_iter( + cursor, + self.connection.features.empty_fetchmany_value, + self.col_count if self.has_extra_select else None, + chunk_size, + ) + if not chunked_fetch or not self.connection.features.can_use_chunked_reads: + # If we are using non-chunked reads, we return the same data + # structure as normally, but ensure it is all read into memory + # before going any further. Use chunked_fetch if requested, + # unless the database doesn't support it. + return list(result) + return result + + def as_subquery_condition(self, alias, columns, compiler): + qn = compiler.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + + for index, select_col in enumerate(self.query.select): + lhs_sql, lhs_params = self.compile(select_col) + rhs = "%s.%s" % (qn(alias), qn2(columns[index])) + self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND) + + sql, params = self.as_sql() + return "EXISTS (%s)" % sql, params + + def explain_query(self): + result = list(self.execute_sql()) + # Some backends return 1 item tuples with strings, and others return + # tuples with integers and strings. Flatten them out into strings. + format_ = self.query.explain_info.format + output_formatter = json.dumps if format_ and format_.lower() == "json" else str + for row in result[0]: + if not isinstance(row, str): + yield " ".join(output_formatter(c) for c in row) + else: + yield row + + +class SQLInsertCompiler(SQLCompiler): + returning_fields = None + returning_params = () + + def field_as_sql(self, field, val): + """ + Take a field and a value intended to be saved on that field, and + return placeholder SQL and accompanying params. Check for raw values, + expressions, and fields with get_placeholder() defined in that order. + + When field is None, consider the value raw and use it as the + placeholder, with no corresponding parameters returned. + """ + if field is None: + # A field value of None means the value is raw. + sql, params = val, [] + elif hasattr(val, "as_sql"): + # This is an expression, let's compile it. + sql, params = self.compile(val) + elif hasattr(field, "get_placeholder"): + # Some fields (e.g. geo fields) need special munging before + # they can be inserted. + sql, params = field.get_placeholder(val, self, self.connection), [val] + else: + # Return the common case for the placeholder + sql, params = "%s", [val] + + # The following hook is only used by Oracle Spatial, which sometimes + # needs to yield 'NULL' and [] as its placeholder and params instead + # of '%s' and [None]. The 'NULL' placeholder is produced earlier by + # OracleOperations.get_geom_placeholder(). The following line removes + # the corresponding None parameter. See ticket #10888. + params = self.connection.ops.modify_insert_params(sql, params) + + return sql, params + + def prepare_value(self, field, value): + """ + Prepare a value to be used in a query by resolving it if it is an + expression and otherwise calling the field's get_db_prep_save(). + """ + if hasattr(value, "resolve_expression"): + value = value.resolve_expression( + self.query, allow_joins=False, for_save=True + ) + # Don't allow values containing Col expressions. They refer to + # existing columns on a row, but in the case of insert the row + # doesn't exist yet. + if value.contains_column_references: + raise ValueError( + 'Failed to insert expression "%s" on %s. F() expressions ' + "can only be used to update, not to insert." % (value, field) + ) + if value.contains_aggregate: + raise FieldError( + "Aggregate functions are not allowed in this query " + "(%s=%r)." % (field.name, value) + ) + if value.contains_over_clause: + raise FieldError( + "Window expressions are not allowed in this query (%s=%r)." + % (field.name, value) + ) + return field.get_db_prep_save(value, connection=self.connection) + + def pre_save_val(self, field, obj): + """ + Get the given field's value off the given obj. pre_save() is used for + things like auto_now on DateTimeField. Skip it if this is a raw query. + """ + if self.query.raw: + return getattr(obj, field.attname) + return field.pre_save(obj, add=True) + + def assemble_as_sql(self, fields, value_rows): + """ + Take a sequence of N fields and a sequence of M rows of values, and + generate placeholder SQL and parameters for each field and value. + Return a pair containing: + * a sequence of M rows of N SQL placeholder strings, and + * a sequence of M rows of corresponding parameter values. + + Each placeholder string may contain any number of '%s' interpolation + strings, and each parameter row will contain exactly as many params + as the total number of '%s's in the corresponding placeholder row. + """ + if not value_rows: + return [], [] + + # list of (sql, [params]) tuples for each object to be saved + # Shape: [n_objs][n_fields][2] + rows_of_fields_as_sql = ( + (self.field_as_sql(field, v) for field, v in zip(fields, row)) + for row in value_rows + ) + + # tuple like ([sqls], [[params]s]) for each object to be saved + # Shape: [n_objs][2][n_fields] + sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) + + # Extract separate lists for placeholders and params. + # Each of these has shape [n_objs][n_fields] + placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) + + # Params for each field are still lists, and need to be flattened. + param_rows = [[p for ps in row for p in ps] for row in param_rows] + + return placeholder_rows, param_rows + + def as_sql(self): + # We don't need quote_name_unless_alias() here, since these are all + # going to be column names (so we can avoid the extra overhead). + qn = self.connection.ops.quote_name + opts = self.query.get_meta() + insert_statement = self.connection.ops.insert_statement( + on_conflict=self.query.on_conflict, + ) + result = ["%s %s" % (insert_statement, qn(opts.db_table))] + fields = self.query.fields or [opts.pk] + result.append("(%s)" % ", ".join(qn(f.column) for f in fields)) + + if self.query.fields: + value_rows = [ + [ + self.prepare_value(field, self.pre_save_val(field, obj)) + for field in fields + ] + for obj in self.query.objs + ] + else: + # An empty object. + value_rows = [ + [self.connection.ops.pk_default_value()] for _ in self.query.objs + ] + fields = [None] + + # Currently the backends just accept values when generating bulk + # queries and generate their own placeholders. Doing that isn't + # necessary and it should be possible to use placeholders and + # expressions in bulk inserts too. + can_bulk = ( + not self.returning_fields and self.connection.features.has_bulk_insert + ) + + placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) + + on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql( + fields, + self.query.on_conflict, + (f.column for f in self.query.update_fields), + (f.column for f in self.query.unique_fields), + ) + if ( + self.returning_fields + and self.connection.features.can_return_columns_from_insert + ): + if self.connection.features.can_return_rows_from_bulk_insert: + result.append( + self.connection.ops.bulk_insert_sql(fields, placeholder_rows) + ) + params = param_rows + else: + result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) + params = [param_rows[0]] + if on_conflict_suffix_sql: + result.append(on_conflict_suffix_sql) + # Skip empty r_sql to allow subclasses to customize behavior for + # 3rd party backends. Refs #19096. + r_sql, self.returning_params = self.connection.ops.return_insert_columns( + self.returning_fields + ) + if r_sql: + result.append(r_sql) + params += [self.returning_params] + return [(" ".join(result), tuple(chain.from_iterable(params)))] + + if can_bulk: + result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) + if on_conflict_suffix_sql: + result.append(on_conflict_suffix_sql) + return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] + else: + if on_conflict_suffix_sql: + result.append(on_conflict_suffix_sql) + return [ + (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) + for p, vals in zip(placeholder_rows, param_rows) + ] + + def execute_sql(self, returning_fields=None): + assert not ( + returning_fields + and len(self.query.objs) != 1 + and not self.connection.features.can_return_rows_from_bulk_insert + ) + opts = self.query.get_meta() + self.returning_fields = returning_fields + with self.connection.cursor() as cursor: + for sql, params in self.as_sql(): + cursor.execute(sql, params) + if not self.returning_fields: + return [] + if ( + self.connection.features.can_return_rows_from_bulk_insert + and len(self.query.objs) > 1 + ): + rows = self.connection.ops.fetch_returned_insert_rows(cursor) + elif self.connection.features.can_return_columns_from_insert: + assert len(self.query.objs) == 1 + rows = [ + self.connection.ops.fetch_returned_insert_columns( + cursor, + self.returning_params, + ) + ] + else: + rows = [ + ( + self.connection.ops.last_insert_id( + cursor, + opts.db_table, + opts.pk.column, + ), + ) + ] + cols = [field.get_col(opts.db_table) for field in self.returning_fields] + converters = self.get_converters(cols) + if converters: + rows = list(self.apply_converters(rows, converters)) + return rows + + +class SQLDeleteCompiler(SQLCompiler): + @cached_property + def single_alias(self): + # Ensure base table is in aliases. + self.query.get_initial_alias() + return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 + + @classmethod + def _expr_refs_base_model(cls, expr, base_model): + if isinstance(expr, Query): + return expr.model == base_model + if not hasattr(expr, "get_source_expressions"): + return False + return any( + cls._expr_refs_base_model(source_expr, base_model) + for source_expr in expr.get_source_expressions() + ) + + @cached_property + def contains_self_reference_subquery(self): + return any( + self._expr_refs_base_model(expr, self.query.model) + for expr in chain( + self.query.annotations.values(), self.query.where.children + ) + ) + + def _as_sql(self, query): + delete = "DELETE FROM %s" % self.quote_name_unless_alias(query.base_table) + try: + where, params = self.compile(query.where) + except FullResultSet: + return delete, () + return f"{delete} WHERE {where}", tuple(params) + + def as_sql(self): + """ + Create the SQL for this query. Return the SQL string and list of + parameters. + """ + if self.single_alias and ( + self.connection.features.delete_can_self_reference_subquery + or not self.contains_self_reference_subquery + ): + return self._as_sql(self.query) + innerq = self.query.clone() + innerq.__class__ = Query + innerq.clear_select_clause() + pk = self.query.model._meta.pk + innerq.select = [pk.get_col(self.query.get_initial_alias())] + outerq = Query(self.query.model) + if not self.connection.features.update_can_self_select: + # Force the materialization of the inner query to allow reference + # to the target table on MySQL. + sql, params = innerq.get_compiler(connection=self.connection).as_sql() + innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params) + outerq.add_filter("pk__in", innerq) + return self._as_sql(outerq) + + +class SQLUpdateCompiler(SQLCompiler): + def as_sql(self): + """ + Create the SQL for this query. Return the SQL string and list of + parameters. + """ + self.pre_sql_setup() + if not self.query.values: + return "", () + qn = self.quote_name_unless_alias + values, update_params = [], [] + for field, model, val in self.query.values: + if hasattr(val, "resolve_expression"): + val = val.resolve_expression( + self.query, allow_joins=False, for_save=True + ) + if val.contains_aggregate: + raise FieldError( + "Aggregate functions are not allowed in this query " + "(%s=%r)." % (field.name, val) + ) + if val.contains_over_clause: + raise FieldError( + "Window expressions are not allowed in this query " + "(%s=%r)." % (field.name, val) + ) + elif hasattr(val, "prepare_database_save"): + if field.remote_field: + val = val.prepare_database_save(field) + else: + raise TypeError( + "Tried to update field %s with a model instance, %r. " + "Use a value compatible with %s." + % (field, val, field.__class__.__name__) + ) + val = field.get_db_prep_save(val, connection=self.connection) + + # Getting the placeholder for the field. + if hasattr(field, "get_placeholder"): + placeholder = field.get_placeholder(val, self, self.connection) + else: + placeholder = "%s" + name = field.column + if hasattr(val, "as_sql"): + sql, params = self.compile(val) + values.append("%s = %s" % (qn(name), placeholder % sql)) + update_params.extend(params) + elif val is not None: + values.append("%s = %s" % (qn(name), placeholder)) + update_params.append(val) + else: + values.append("%s = NULL" % qn(name)) + table = self.query.base_table + result = [ + "UPDATE %s SET" % qn(table), + ", ".join(values), + ] + try: + where, params = self.compile(self.query.where) + except FullResultSet: + params = [] + else: + result.append("WHERE %s" % where) + return " ".join(result), tuple(update_params + params) + + def execute_sql(self, result_type): + """ + Execute the specified update. Return the number of rows affected by + the primary update query. The "primary update query" is the first + non-empty query that is executed. Row counts for any subsequent, + related queries are not available. + """ + cursor = super().execute_sql(result_type) + try: + rows = cursor.rowcount if cursor else 0 + is_empty = cursor is None + finally: + if cursor: + cursor.close() + for query in self.query.get_related_updates(): + aux_rows = query.get_compiler(self.using).execute_sql(result_type) + if is_empty and aux_rows: + rows = aux_rows + is_empty = False + return rows + + def pre_sql_setup(self): + """ + If the update depends on results from other tables, munge the "where" + conditions to match the format required for (portable) SQL updates. + + If multiple updates are required, pull out the id values to update at + this point so that they don't change as a result of the progressive + updates. + """ + refcounts_before = self.query.alias_refcount.copy() + # Ensure base table is in the query + self.query.get_initial_alias() + count = self.query.count_active_tables() + if not self.query.related_updates and count == 1: + return + query = self.query.chain(klass=Query) + query.select_related = False + query.clear_ordering(force=True) + query.extra = {} + query.select = [] + meta = query.get_meta() + fields = [meta.pk.name] + related_ids_index = [] + for related in self.query.related_updates: + if all( + path.join_field.primary_key for path in meta.get_path_to_parent(related) + ): + # If a primary key chain exists to the targeted related update, + # then the meta.pk value can be used for it. + related_ids_index.append((related, 0)) + else: + # This branch will only be reached when updating a field of an + # ancestor that is not part of the primary key chain of a MTI + # tree. + related_ids_index.append((related, len(fields))) + fields.append(related._meta.pk.name) + query.add_fields(fields) + super().pre_sql_setup() + + must_pre_select = ( + count > 1 and not self.connection.features.update_can_self_select + ) + + # Now we adjust the current query: reset the where clause and get rid + # of all the tables we don't need (since they're in the sub-select). + self.query.clear_where() + if self.query.related_updates or must_pre_select: + # Either we're using the idents in multiple update queries (so + # don't want them to change), or the db backend doesn't support + # selecting from the updating table (e.g. MySQL). + idents = [] + related_ids = collections.defaultdict(list) + for rows in query.get_compiler(self.using).execute_sql(MULTI): + idents.extend(r[0] for r in rows) + for parent, index in related_ids_index: + related_ids[parent].extend(r[index] for r in rows) + self.query.add_filter("pk__in", idents) + self.query.related_ids = related_ids + else: + # The fast path. Filters and updates in one query. + self.query.add_filter("pk__in", query) + self.query.reset_refcounts(refcounts_before) + + +class SQLAggregateCompiler(SQLCompiler): + def as_sql(self): + """ + Create the SQL for this query. Return the SQL string and list of + parameters. + """ + sql, params = [], [] + for annotation in self.query.annotation_select.values(): + ann_sql, ann_params = self.compile(annotation) + ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) + sql.append(ann_sql) + params.extend(ann_params) + self.col_count = len(self.query.annotation_select) + sql = ", ".join(sql) + params = tuple(params) + + inner_query_sql, inner_query_params = self.query.inner_query.get_compiler( + self.using, + elide_empty=self.elide_empty, + ).as_sql(with_col_aliases=True) + sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql) + params += inner_query_params + return sql, params + + +def cursor_iter(cursor, sentinel, col_count, itersize): + """ + Yield blocks of rows from a cursor and ensure the cursor is closed when + done. + """ + try: + for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): + yield rows if col_count is None else [r[:col_count] for r in rows] + finally: + cursor.close() diff --git a/testbed/django__django/django/db/models/sql/constants.py b/testbed/django__django/django/db/models/sql/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fdfb2ea891ff05f95c5260342f982fea7ec2a158 --- /dev/null +++ b/testbed/django__django/django/db/models/sql/constants.py @@ -0,0 +1,24 @@ +""" +Constants specific to the SQL storage portion of the ORM. +""" + +# Size of each "chunk" for get_iterator calls. +# Larger values are slightly faster at the expense of more storage space. +GET_ITERATOR_CHUNK_SIZE = 100 + +# Namedtuples for sql.* internal use. + +# How many results to expect from a cursor.execute call +MULTI = "multi" +SINGLE = "single" +CURSOR = "cursor" +NO_RESULTS = "no results" + +ORDER_DIR = { + "ASC": ("ASC", "DESC"), + "DESC": ("DESC", "ASC"), +} + +# SQL join types. +INNER = "INNER JOIN" +LOUTER = "LEFT OUTER JOIN" diff --git a/testbed/django__django/django/db/models/sql/datastructures.py b/testbed/django__django/django/db/models/sql/datastructures.py new file mode 100644 index 0000000000000000000000000000000000000000..5eaa8c25f6bd77a8a991f460192dcb8f1b192184 --- /dev/null +++ b/testbed/django__django/django/db/models/sql/datastructures.py @@ -0,0 +1,237 @@ +""" +Useful auxiliary data structures for query construction. Not useful outside +the SQL domain. +""" +import warnings + +from django.core.exceptions import FullResultSet +from django.db.models.sql.constants import INNER, LOUTER +from django.utils.deprecation import RemovedInDjango60Warning + + +class MultiJoin(Exception): + """ + Used by join construction code to indicate the point at which a + multi-valued join was attempted (if the caller wants to treat that + exceptionally). + """ + + def __init__(self, names_pos, path_with_names): + self.level = names_pos + # The path travelled, this includes the path to the multijoin. + self.names_with_path = path_with_names + + +class Empty: + pass + + +class Join: + """ + Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the + FROM entry. For example, the SQL generated could be + LEFT OUTER JOIN "sometable" T1 + ON ("othertable"."sometable_id" = "sometable"."id") + + This class is primarily used in Query.alias_map. All entries in alias_map + must be Join compatible by providing the following attributes and methods: + - table_name (string) + - table_alias (possible alias for the table, can be None) + - join_type (can be None for those entries that aren't joined from + anything) + - parent_alias (which table is this join's parent, can be None similarly + to join_type) + - as_sql() + - relabeled_clone() + """ + + def __init__( + self, + table_name, + parent_alias, + table_alias, + join_type, + join_field, + nullable, + filtered_relation=None, + ): + # Join table + self.table_name = table_name + self.parent_alias = parent_alias + # Note: table_alias is not necessarily known at instantiation time. + self.table_alias = table_alias + # LOUTER or INNER + self.join_type = join_type + # A list of 2-tuples to use in the ON clause of the JOIN. + # Each 2-tuple will create one join condition in the ON clause. + if hasattr(join_field, "get_joining_fields"): + self.join_fields = join_field.get_joining_fields() + self.join_cols = tuple( + (lhs_field.column, rhs_field.column) + for lhs_field, rhs_field in self.join_fields + ) + else: + warnings.warn( + "The usage of get_joining_columns() in Join is deprecated. Implement " + "get_joining_fields() instead.", + RemovedInDjango60Warning, + ) + self.join_fields = None + self.join_cols = join_field.get_joining_columns() + # Along which field (or ForeignObjectRel in the reverse join case) + self.join_field = join_field + # Is this join nullabled? + self.nullable = nullable + self.filtered_relation = filtered_relation + + def as_sql(self, compiler, connection): + """ + Generate the full + LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params + clause for this join. + """ + join_conditions = [] + params = [] + qn = compiler.quote_name_unless_alias + qn2 = connection.ops.quote_name + # Add a join condition for each pair of joining columns. + # RemovedInDjango60Warning: when the depraction ends, replace with: + # for lhs, rhs in self.join_field: + join_fields = self.join_fields or self.join_cols + for lhs, rhs in join_fields: + if isinstance(lhs, str): + # RemovedInDjango60Warning: when the depraction ends, remove + # the branch for strings. + lhs_full_name = "%s.%s" % (qn(self.parent_alias), qn2(lhs)) + rhs_full_name = "%s.%s" % (qn(self.table_alias), qn2(rhs)) + else: + lhs, rhs = connection.ops.prepare_join_on_clause( + self.parent_alias, lhs, self.table_alias, rhs + ) + lhs_sql, lhs_params = compiler.compile(lhs) + lhs_full_name = lhs_sql % lhs_params + rhs_sql, rhs_params = compiler.compile(rhs) + rhs_full_name = rhs_sql % rhs_params + join_conditions.append(f"{lhs_full_name} = {rhs_full_name}") + + # Add a single condition inside parentheses for whatever + # get_extra_restriction() returns. + extra_cond = self.join_field.get_extra_restriction( + self.table_alias, self.parent_alias + ) + if extra_cond: + extra_sql, extra_params = compiler.compile(extra_cond) + join_conditions.append("(%s)" % extra_sql) + params.extend(extra_params) + if self.filtered_relation: + try: + extra_sql, extra_params = compiler.compile(self.filtered_relation) + except FullResultSet: + pass + else: + join_conditions.append("(%s)" % extra_sql) + params.extend(extra_params) + if not join_conditions: + # This might be a rel on the other end of an actual declared field. + declared_field = getattr(self.join_field, "field", self.join_field) + raise ValueError( + "Join generated an empty ON clause. %s did not yield either " + "joining columns or extra restrictions." % declared_field.__class__ + ) + on_clause_sql = " AND ".join(join_conditions) + alias_str = ( + "" if self.table_alias == self.table_name else (" %s" % self.table_alias) + ) + sql = "%s %s%s ON (%s)" % ( + self.join_type, + qn(self.table_name), + alias_str, + on_clause_sql, + ) + return sql, params + + def relabeled_clone(self, change_map): + new_parent_alias = change_map.get(self.parent_alias, self.parent_alias) + new_table_alias = change_map.get(self.table_alias, self.table_alias) + if self.filtered_relation is not None: + filtered_relation = self.filtered_relation.relabeled_clone(change_map) + else: + filtered_relation = None + return self.__class__( + self.table_name, + new_parent_alias, + new_table_alias, + self.join_type, + self.join_field, + self.nullable, + filtered_relation=filtered_relation, + ) + + @property + def identity(self): + return ( + self.__class__, + self.table_name, + self.parent_alias, + self.join_field, + self.filtered_relation, + ) + + def __eq__(self, other): + if not isinstance(other, Join): + return NotImplemented + return self.identity == other.identity + + def __hash__(self): + return hash(self.identity) + + def demote(self): + new = self.relabeled_clone({}) + new.join_type = INNER + return new + + def promote(self): + new = self.relabeled_clone({}) + new.join_type = LOUTER + return new + + +class BaseTable: + """ + The BaseTable class is used for base table references in FROM clause. For + example, the SQL "foo" in + SELECT * FROM "foo" WHERE somecond + could be generated by this class. + """ + + join_type = None + parent_alias = None + filtered_relation = None + + def __init__(self, table_name, alias): + self.table_name = table_name + self.table_alias = alias + + def as_sql(self, compiler, connection): + alias_str = ( + "" if self.table_alias == self.table_name else (" %s" % self.table_alias) + ) + base_sql = compiler.quote_name_unless_alias(self.table_name) + return base_sql + alias_str, [] + + def relabeled_clone(self, change_map): + return self.__class__( + self.table_name, change_map.get(self.table_alias, self.table_alias) + ) + + @property + def identity(self): + return self.__class__, self.table_name, self.table_alias + + def __eq__(self, other): + if not isinstance(other, BaseTable): + return NotImplemented + return self.identity == other.identity + + def __hash__(self): + return hash(self.identity) diff --git a/testbed/django__django/django/db/models/sql/query.py b/testbed/django__django/django/db/models/sql/query.py new file mode 100644 index 0000000000000000000000000000000000000000..13a6809dd83f9c1f143e2dee016a5aa028627498 --- /dev/null +++ b/testbed/django__django/django/db/models/sql/query.py @@ -0,0 +1,2678 @@ +""" +Create SQL statements for QuerySets. + +The code in here encapsulates all of the SQL construction so that QuerySets +themselves do not have to (and could be backed by things other than SQL +databases). The abstraction barrier only works one way: this module has to know +all about the internals of models in order to get the information it needs. +""" +import copy +import difflib +import functools +import sys +from collections import Counter, namedtuple +from collections.abc import Iterator, Mapping +from itertools import chain, count, product +from string import ascii_uppercase + +from django.core.exceptions import FieldDoesNotExist, FieldError +from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections +from django.db.models.aggregates import Count +from django.db.models.constants import LOOKUP_SEP +from django.db.models.expressions import ( + BaseExpression, + Col, + Exists, + F, + OuterRef, + Ref, + ResolvedOuterRef, + Value, +) +from django.db.models.fields import Field +from django.db.models.fields.related_lookups import MultiColSource +from django.db.models.lookups import Lookup +from django.db.models.query_utils import ( + Q, + check_rel_lookup_compatibility, + refs_expression, +) +from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE +from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin +from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode +from django.utils.functional import cached_property +from django.utils.regex_helper import _lazy_re_compile +from django.utils.tree import Node + +__all__ = ["Query", "RawQuery"] + +# Quotation marks ('"`[]), whitespace characters, semicolons, or inline +# SQL comments are forbidden in column aliases. +FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/") + +# Inspired from +# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+") + + +def get_field_names_from_opts(opts): + if opts is None: + return set() + return set( + chain.from_iterable( + (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() + ) + ) + + +def get_children_from_q(q): + for child in q.children: + if isinstance(child, Node): + yield from get_children_from_q(child) + else: + yield child + + +def rename_prefix_from_q(prefix, replacement, q): + return Q.create( + [ + rename_prefix_from_q(prefix, replacement, c) + if isinstance(c, Node) + else (c[0].replace(prefix, replacement, 1), c[1]) + for c in q.children + ], + q.connector, + q.negated, + ) + + +JoinInfo = namedtuple( + "JoinInfo", + ("final_field", "targets", "opts", "joins", "path", "transform_function"), +) + + +class RawQuery: + """A single raw SQL query.""" + + def __init__(self, sql, using, params=()): + self.params = params + self.sql = sql + self.using = using + self.cursor = None + + # Mirror some properties of a normal query so that + # the compiler can be used to process results. + self.low_mark, self.high_mark = 0, None # Used for offset/limit + self.extra_select = {} + self.annotation_select = {} + + def chain(self, using): + return self.clone(using) + + def clone(self, using): + return RawQuery(self.sql, using, params=self.params) + + def get_columns(self): + if self.cursor is None: + self._execute_query() + converter = connections[self.using].introspection.identifier_converter + return [converter(column_meta[0]) for column_meta in self.cursor.description] + + def __iter__(self): + # Always execute a new query for a new iterator. + # This could be optimized with a cache at the expense of RAM. + self._execute_query() + if not connections[self.using].features.can_use_chunked_reads: + # If the database can't use chunked reads we need to make sure we + # evaluate the entire query up front. + result = list(self.cursor) + else: + result = self.cursor + return iter(result) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + @property + def params_type(self): + if self.params is None: + return None + return dict if isinstance(self.params, Mapping) else tuple + + def __str__(self): + if self.params_type is None: + return self.sql + return self.sql % self.params_type(self.params) + + def _execute_query(self): + connection = connections[self.using] + + # Adapt parameters to the database, as much as possible considering + # that the target type isn't known. See #17755. + params_type = self.params_type + adapter = connection.ops.adapt_unknown_value + if params_type is tuple: + params = tuple(adapter(val) for val in self.params) + elif params_type is dict: + params = {key: adapter(val) for key, val in self.params.items()} + elif params_type is None: + params = None + else: + raise RuntimeError("Unexpected params type: %s" % params_type) + + self.cursor = connection.cursor() + self.cursor.execute(self.sql, params) + + +ExplainInfo = namedtuple("ExplainInfo", ("format", "options")) + + +class Query(BaseExpression): + """A single SQL query.""" + + alias_prefix = "T" + empty_result_set_value = None + subq_aliases = frozenset([alias_prefix]) + + compiler = "SQLCompiler" + + base_table_class = BaseTable + join_class = Join + + default_cols = True + default_ordering = True + standard_ordering = True + + filter_is_sticky = False + subquery = False + + # SQL-related attributes. + # Select and related select clauses are expressions to use in the SELECT + # clause of the query. The select is used for cases where we want to set up + # the select clause to contain other than default fields (values(), + # subqueries...). Note that annotations go to annotations dictionary. + select = () + # The group_by attribute can have one of the following forms: + # - None: no group by at all in the query + # - A tuple of expressions: group by (at least) those expressions. + # String refs are also allowed for now. + # - True: group by all select fields of the model + # See compiler.get_group_by() for details. + group_by = None + order_by = () + low_mark = 0 # Used for offset/limit. + high_mark = None # Used for offset/limit. + distinct = False + distinct_fields = () + select_for_update = False + select_for_update_nowait = False + select_for_update_skip_locked = False + select_for_update_of = () + select_for_no_key_update = False + select_related = False + has_select_fields = False + # Arbitrary limit for select_related to prevents infinite recursion. + max_depth = 5 + # Holds the selects defined by a call to values() or values_list() + # excluding annotation_select and extra_select. + values_select = () + + # SQL annotation-related attributes. + annotation_select_mask = None + _annotation_select_cache = None + + # Set combination attributes. + combinator = None + combinator_all = False + combined_queries = () + + # These are for extensions. The contents are more or less appended verbatim + # to the appropriate clause. + extra_select_mask = None + _extra_select_cache = None + + extra_tables = () + extra_order_by = () + + # A tuple that is a set of model field names and either True, if these are + # the fields to defer, or False if these are the only fields to load. + deferred_loading = (frozenset(), True) + + explain_info = None + + def __init__(self, model, alias_cols=True): + self.model = model + self.alias_refcount = {} + # alias_map is the most important data structure regarding joins. + # It's used for recording which joins exist in the query and what + # types they are. The key is the alias of the joined table (possibly + # the table name) and the value is a Join-like object (see + # sql.datastructures.Join for more information). + self.alias_map = {} + # Whether to provide alias to columns during reference resolving. + self.alias_cols = alias_cols + # Sometimes the query contains references to aliases in outer queries (as + # a result of split_exclude). Correct alias quoting needs to know these + # aliases too. + # Map external tables to whether they are aliased. + self.external_aliases = {} + self.table_map = {} # Maps table names to list of aliases. + self.used_aliases = set() + + self.where = WhereNode() + # Maps alias -> Annotation Expression. + self.annotations = {} + # These are for extensions. The contents are more or less appended + # verbatim to the appropriate clause. + self.extra = {} # Maps col_alias -> (col_sql, params). + + self._filtered_relations = {} + + @property + def output_field(self): + if len(self.select) == 1: + select = self.select[0] + return getattr(select, "target", None) or select.field + elif len(self.annotation_select) == 1: + return next(iter(self.annotation_select.values())).output_field + + @cached_property + def base_table(self): + for alias in self.alias_map: + return alias + + def __str__(self): + """ + Return the query as a string of SQL with the parameter values + substituted in (use sql_with_params() to see the unsubstituted string). + + Parameter values won't necessarily be quoted correctly, since that is + done by the database interface at execution time. + """ + sql, params = self.sql_with_params() + return sql % params + + def sql_with_params(self): + """ + Return the query as an SQL string and the parameters that will be + substituted into the query. + """ + return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() + + def __deepcopy__(self, memo): + """Limit the amount of work when a Query is deepcopied.""" + result = self.clone() + memo[id(self)] = result + return result + + def get_compiler(self, using=None, connection=None, elide_empty=True): + if using is None and connection is None: + raise ValueError("Need either using or connection") + if using: + connection = connections[using] + return connection.ops.compiler(self.compiler)( + self, connection, using, elide_empty + ) + + def get_meta(self): + """ + Return the Options instance (the model._meta) from which to start + processing. Normally, this is self.model._meta, but it can be changed + by subclasses. + """ + if self.model: + return self.model._meta + + def clone(self): + """ + Return a copy of the current Query. A lightweight alternative to + deepcopy(). + """ + obj = Empty() + obj.__class__ = self.__class__ + # Copy references to everything. + obj.__dict__ = self.__dict__.copy() + # Clone attributes that can't use shallow copy. + obj.alias_refcount = self.alias_refcount.copy() + obj.alias_map = self.alias_map.copy() + obj.external_aliases = self.external_aliases.copy() + obj.table_map = self.table_map.copy() + obj.where = self.where.clone() + obj.annotations = self.annotations.copy() + if self.annotation_select_mask is not None: + obj.annotation_select_mask = self.annotation_select_mask.copy() + if self.combined_queries: + obj.combined_queries = tuple( + [query.clone() for query in self.combined_queries] + ) + # _annotation_select_cache cannot be copied, as doing so breaks the + # (necessary) state in which both annotations and + # _annotation_select_cache point to the same underlying objects. + # It will get re-populated in the cloned queryset the next time it's + # used. + obj._annotation_select_cache = None + obj.extra = self.extra.copy() + if self.extra_select_mask is not None: + obj.extra_select_mask = self.extra_select_mask.copy() + if self._extra_select_cache is not None: + obj._extra_select_cache = self._extra_select_cache.copy() + if self.select_related is not False: + # Use deepcopy because select_related stores fields in nested + # dicts. + obj.select_related = copy.deepcopy(obj.select_related) + if "subq_aliases" in self.__dict__: + obj.subq_aliases = self.subq_aliases.copy() + obj.used_aliases = self.used_aliases.copy() + obj._filtered_relations = self._filtered_relations.copy() + # Clear the cached_property, if it exists. + obj.__dict__.pop("base_table", None) + return obj + + def chain(self, klass=None): + """ + Return a copy of the current Query that's ready for another operation. + The klass argument changes the type of the Query, e.g. UpdateQuery. + """ + obj = self.clone() + if klass and obj.__class__ != klass: + obj.__class__ = klass + if not obj.filter_is_sticky: + obj.used_aliases = set() + obj.filter_is_sticky = False + if hasattr(obj, "_setup_query"): + obj._setup_query() + return obj + + def relabeled_clone(self, change_map): + clone = self.clone() + clone.change_aliases(change_map) + return clone + + def _get_col(self, target, field, alias): + if not self.alias_cols: + alias = None + return target.get_col(alias, field) + + def get_aggregation(self, using, aggregate_exprs): + """ + Return the dictionary with the values of the existing aggregations. + """ + if not aggregate_exprs: + return {} + # Store annotation mask prior to temporarily adding aggregations for + # resolving purpose to facilitate their subsequent removal. + refs_subquery = False + replacements = {} + annotation_select_mask = self.annotation_select_mask + for alias, aggregate_expr in aggregate_exprs.items(): + self.check_alias(alias) + aggregate = aggregate_expr.resolve_expression( + self, allow_joins=True, reuse=None, summarize=True + ) + if not aggregate.contains_aggregate: + raise TypeError("%s is not an aggregate expression" % alias) + # Temporarily add aggregate to annotations to allow remaining + # members of `aggregates` to resolve against each others. + self.append_annotation_mask([alias]) + refs_subquery |= any( + getattr(self.annotations[ref], "subquery", False) + for ref in aggregate.get_refs() + ) + aggregate = aggregate.replace_expressions(replacements) + self.annotations[alias] = aggregate + replacements[Ref(alias, aggregate)] = aggregate + # Stash resolved aggregates now that they have been allowed to resolve + # against each other. + aggregates = {alias: self.annotations.pop(alias) for alias in aggregate_exprs} + self.set_annotation_mask(annotation_select_mask) + # Existing usage of aggregation can be determined by the presence of + # selected aggregates but also by filters against aliased aggregates. + _, having, qualify = self.where.split_having_qualify() + has_existing_aggregation = ( + any( + getattr(annotation, "contains_aggregate", True) + for annotation in self.annotations.values() + ) + or having + ) + # Decide if we need to use a subquery. + # + # Existing aggregations would cause incorrect results as + # get_aggregation() must produce just one result and thus must not use + # GROUP BY. + # + # If the query has limit or distinct, or uses set operations, then + # those operations must be done in a subquery so that the query + # aggregates on the limit and/or distinct results instead of applying + # the distinct and limit after the aggregation. + if ( + isinstance(self.group_by, tuple) + or self.is_sliced + or has_existing_aggregation + or refs_subquery + or qualify + or self.distinct + or self.combinator + ): + from django.db.models.sql.subqueries import AggregateQuery + + inner_query = self.clone() + inner_query.subquery = True + outer_query = AggregateQuery(self.model, inner_query) + inner_query.select_for_update = False + inner_query.select_related = False + inner_query.set_annotation_mask(self.annotation_select) + # Queries with distinct_fields need ordering and when a limit is + # applied we must take the slice from the ordered query. Otherwise + # no need for ordering. + inner_query.clear_ordering(force=False) + if not inner_query.distinct: + # If the inner query uses default select and it has some + # aggregate annotations, then we must make sure the inner + # query is grouped by the main model's primary key. However, + # clearing the select clause can alter results if distinct is + # used. + if inner_query.default_cols and has_existing_aggregation: + inner_query.group_by = ( + self.model._meta.pk.get_col(inner_query.get_initial_alias()), + ) + inner_query.default_cols = False + if not qualify: + # Mask existing annotations that are not referenced by + # aggregates to be pushed to the outer query unless + # filtering against window functions is involved as it + # requires complex realising. + annotation_mask = set() + if isinstance(self.group_by, tuple): + for expr in self.group_by: + annotation_mask |= expr.get_refs() + for aggregate in aggregates.values(): + annotation_mask |= aggregate.get_refs() + inner_query.set_annotation_mask(annotation_mask) + + # Add aggregates to the outer AggregateQuery. This requires making + # sure all columns referenced by the aggregates are selected in the + # inner query. It is achieved by retrieving all column references + # by the aggregates, explicitly selecting them in the inner query, + # and making sure the aggregates are repointed to them. + col_refs = {} + for alias, aggregate in aggregates.items(): + replacements = {} + for col in self._gen_cols([aggregate], resolve_refs=False): + if not (col_ref := col_refs.get(col)): + index = len(col_refs) + 1 + col_alias = f"__col{index}" + col_ref = Ref(col_alias, col) + col_refs[col] = col_ref + inner_query.annotations[col_alias] = col + inner_query.append_annotation_mask([col_alias]) + replacements[col] = col_ref + outer_query.annotations[alias] = aggregate.replace_expressions( + replacements + ) + if ( + inner_query.select == () + and not inner_query.default_cols + and not inner_query.annotation_select_mask + ): + # In case of Model.objects[0:3].count(), there would be no + # field selected in the inner query, yet we must use a subquery. + # So, make sure at least one field is selected. + inner_query.select = ( + self.model._meta.pk.get_col(inner_query.get_initial_alias()), + ) + else: + outer_query = self + self.select = () + self.default_cols = False + self.extra = {} + if self.annotations: + # Inline reference to existing annotations and mask them as + # they are unnecessary given only the summarized aggregations + # are requested. + replacements = { + Ref(alias, annotation): annotation + for alias, annotation in self.annotations.items() + } + self.annotations = { + alias: aggregate.replace_expressions(replacements) + for alias, aggregate in aggregates.items() + } + else: + self.annotations = aggregates + self.set_annotation_mask(aggregates) + + empty_set_result = [ + expression.empty_result_set_value + for expression in outer_query.annotation_select.values() + ] + elide_empty = not any(result is NotImplemented for result in empty_set_result) + outer_query.clear_ordering(force=True) + outer_query.clear_limits() + outer_query.select_for_update = False + outer_query.select_related = False + compiler = outer_query.get_compiler(using, elide_empty=elide_empty) + result = compiler.execute_sql(SINGLE) + if result is None: + result = empty_set_result + else: + converters = compiler.get_converters(outer_query.annotation_select.values()) + result = next(compiler.apply_converters((result,), converters)) + + return dict(zip(outer_query.annotation_select, result)) + + def get_count(self, using): + """ + Perform a COUNT() query using the current filter constraints. + """ + obj = self.clone() + return obj.get_aggregation(using, {"__count": Count("*")})["__count"] + + def has_filters(self): + return self.where + + def exists(self, limit=True): + q = self.clone() + if not (q.distinct and q.is_sliced): + if q.group_by is True: + q.add_fields( + (f.attname for f in self.model._meta.concrete_fields), False + ) + # Disable GROUP BY aliases to avoid orphaning references to the + # SELECT clause which is about to be cleared. + q.set_group_by(allow_aliases=False) + q.clear_select_clause() + if q.combined_queries and q.combinator == "union": + q.combined_queries = tuple( + combined_query.exists(limit=False) + for combined_query in q.combined_queries + ) + q.clear_ordering(force=True) + if limit: + q.set_limits(high=1) + q.add_annotation(Value(1), "a") + return q + + def has_results(self, using): + q = self.exists(using) + compiler = q.get_compiler(using=using) + return compiler.has_results() + + def explain(self, using, format=None, **options): + q = self.clone() + for option_name in options: + if ( + not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name) + or "--" in option_name + ): + raise ValueError(f"Invalid option name: {option_name!r}.") + q.explain_info = ExplainInfo(format, options) + compiler = q.get_compiler(using=using) + return "\n".join(compiler.explain_query()) + + def combine(self, rhs, connector): + """ + Merge the 'rhs' query into the current one (with any 'rhs' effects + being applied *after* (that is, "to the right of") anything in the + current query. 'rhs' is not modified during a call to this function. + + The 'connector' parameter describes how to connect filters from the + 'rhs' query. + """ + if self.model != rhs.model: + raise TypeError("Cannot combine queries on two different base models.") + if self.is_sliced: + raise TypeError("Cannot combine queries once a slice has been taken.") + if self.distinct != rhs.distinct: + raise TypeError("Cannot combine a unique query with a non-unique query.") + if self.distinct_fields != rhs.distinct_fields: + raise TypeError("Cannot combine queries with different distinct fields.") + + # If lhs and rhs shares the same alias prefix, it is possible to have + # conflicting alias changes like T4 -> T5, T5 -> T6, which might end up + # as T4 -> T6 while combining two querysets. To prevent this, change an + # alias prefix of the rhs and update current aliases accordingly, + # except if the alias is the base table since it must be present in the + # query on both sides. + initial_alias = self.get_initial_alias() + rhs.bump_prefix(self, exclude={initial_alias}) + + # Work out how to relabel the rhs aliases, if necessary. + change_map = {} + conjunction = connector == AND + + # Determine which existing joins can be reused. When combining the + # query with AND we must recreate all joins for m2m filters. When + # combining with OR we can reuse joins. The reason is that in AND + # case a single row can't fulfill a condition like: + # revrel__col=1 & revrel__col=2 + # But, there might be two different related rows matching this + # condition. In OR case a single True is enough, so single row is + # enough, too. + # + # Note that we will be creating duplicate joins for non-m2m joins in + # the AND case. The results will be correct but this creates too many + # joins. This is something that could be fixed later on. + reuse = set() if conjunction else set(self.alias_map) + joinpromoter = JoinPromoter(connector, 2, False) + joinpromoter.add_votes( + j for j in self.alias_map if self.alias_map[j].join_type == INNER + ) + rhs_votes = set() + # Now, add the joins from rhs query into the new query (skipping base + # table). + rhs_tables = list(rhs.alias_map)[1:] + for alias in rhs_tables: + join = rhs.alias_map[alias] + # If the left side of the join was already relabeled, use the + # updated alias. + join = join.relabeled_clone(change_map) + new_alias = self.join(join, reuse=reuse) + if join.join_type == INNER: + rhs_votes.add(new_alias) + # We can't reuse the same join again in the query. If we have two + # distinct joins for the same connection in rhs query, then the + # combined query must have two joins, too. + reuse.discard(new_alias) + if alias != new_alias: + change_map[alias] = new_alias + if not rhs.alias_refcount[alias]: + # The alias was unused in the rhs query. Unref it so that it + # will be unused in the new query, too. We have to add and + # unref the alias so that join promotion has information of + # the join type for the unused alias. + self.unref_alias(new_alias) + joinpromoter.add_votes(rhs_votes) + joinpromoter.update_join_types(self) + + # Combine subqueries aliases to ensure aliases relabelling properly + # handle subqueries when combining where and select clauses. + self.subq_aliases |= rhs.subq_aliases + + # Now relabel a copy of the rhs where-clause and add it to the current + # one. + w = rhs.where.clone() + w.relabel_aliases(change_map) + self.where.add(w, connector) + + # Selection columns and extra extensions are those provided by 'rhs'. + if rhs.select: + self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) + else: + self.select = () + + if connector == OR: + # It would be nice to be able to handle this, but the queries don't + # really make sense (or return consistent value sets). Not worth + # the extra complexity when you can write a real query instead. + if self.extra and rhs.extra: + raise ValueError( + "When merging querysets using 'or', you cannot have " + "extra(select=...) on both sides." + ) + self.extra.update(rhs.extra) + extra_select_mask = set() + if self.extra_select_mask is not None: + extra_select_mask.update(self.extra_select_mask) + if rhs.extra_select_mask is not None: + extra_select_mask.update(rhs.extra_select_mask) + if extra_select_mask: + self.set_extra_mask(extra_select_mask) + self.extra_tables += rhs.extra_tables + + # Ordering uses the 'rhs' ordering, unless it has none, in which case + # the current ordering is used. + self.order_by = rhs.order_by or self.order_by + self.extra_order_by = rhs.extra_order_by or self.extra_order_by + + def _get_defer_select_mask(self, opts, mask, select_mask=None): + if select_mask is None: + select_mask = {} + select_mask[opts.pk] = {} + # All concrete fields that are not part of the defer mask must be + # loaded. If a relational field is encountered it gets added to the + # mask for it be considered if `select_related` and the cycle continues + # by recursively calling this function. + for field in opts.concrete_fields: + field_mask = mask.pop(field.name, None) + field_att_mask = mask.pop(field.attname, None) + if field_mask is None and field_att_mask is None: + select_mask.setdefault(field, {}) + elif field_mask: + if not field.is_relation: + raise FieldError(next(iter(field_mask))) + field_select_mask = select_mask.setdefault(field, {}) + related_model = field.remote_field.model._meta.concrete_model + self._get_defer_select_mask( + related_model._meta, field_mask, field_select_mask + ) + # Remaining defer entries must be references to reverse relationships. + # The following code is expected to raise FieldError if it encounters + # a malformed defer entry. + for field_name, field_mask in mask.items(): + if filtered_relation := self._filtered_relations.get(field_name): + relation = opts.get_field(filtered_relation.relation_name) + field_select_mask = select_mask.setdefault((field_name, relation), {}) + field = relation.field + else: + reverse_rel = opts.get_field(field_name) + # While virtual fields such as many-to-many and generic foreign + # keys cannot be effectively deferred we've historically + # allowed them to be passed to QuerySet.defer(). Ignore such + # field references until a layer of validation at mask + # alteration time will be implemented eventually. + if not hasattr(reverse_rel, "field"): + continue + field = reverse_rel.field + field_select_mask = select_mask.setdefault(field, {}) + related_model = field.model._meta.concrete_model + self._get_defer_select_mask( + related_model._meta, field_mask, field_select_mask + ) + return select_mask + + def _get_only_select_mask(self, opts, mask, select_mask=None): + if select_mask is None: + select_mask = {} + select_mask[opts.pk] = {} + # Only include fields mentioned in the mask. + for field_name, field_mask in mask.items(): + field = opts.get_field(field_name) + # Retrieve the actual field associated with reverse relationships + # as that's what is expected in the select mask. + if field in opts.related_objects: + field_key = field.field + else: + field_key = field + field_select_mask = select_mask.setdefault(field_key, {}) + if field_mask: + if not field.is_relation: + raise FieldError(next(iter(field_mask))) + related_model = field.remote_field.model._meta.concrete_model + self._get_only_select_mask( + related_model._meta, field_mask, field_select_mask + ) + return select_mask + + def get_select_mask(self): + """ + Convert the self.deferred_loading data structure to an alternate data + structure, describing the field that *will* be loaded. This is used to + compute the columns to select from the database and also by the + QuerySet class to work out which fields are being initialized on each + model. Models that have all their fields included aren't mentioned in + the result, only those that have field restrictions in place. + """ + field_names, defer = self.deferred_loading + if not field_names: + return {} + mask = {} + for field_name in field_names: + part_mask = mask + for part in field_name.split(LOOKUP_SEP): + part_mask = part_mask.setdefault(part, {}) + opts = self.get_meta() + if defer: + return self._get_defer_select_mask(opts, mask) + return self._get_only_select_mask(opts, mask) + + def table_alias(self, table_name, create=False, filtered_relation=None): + """ + Return a table alias for the given table_name and whether this is a + new alias or not. + + If 'create' is true, a new alias is always created. Otherwise, the + most recently created alias for the table (if one exists) is reused. + """ + alias_list = self.table_map.get(table_name) + if not create and alias_list: + alias = alias_list[0] + self.alias_refcount[alias] += 1 + return alias, False + + # Create a new alias for this table. + if alias_list: + alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1) + alias_list.append(alias) + else: + # The first occurrence of a table uses the table name directly. + alias = ( + filtered_relation.alias if filtered_relation is not None else table_name + ) + self.table_map[table_name] = [alias] + self.alias_refcount[alias] = 1 + return alias, True + + def ref_alias(self, alias): + """Increases the reference count for this alias.""" + self.alias_refcount[alias] += 1 + + def unref_alias(self, alias, amount=1): + """Decreases the reference count for this alias.""" + self.alias_refcount[alias] -= amount + + def promote_joins(self, aliases): + """ + Promote recursively the join type of given aliases and its children to + an outer join. If 'unconditional' is False, only promote the join if + it is nullable or the parent join is an outer join. + + The children promotion is done to avoid join chains that contain a LOUTER + b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, + then we must also promote b->c automatically, or otherwise the promotion + of a->b doesn't actually change anything in the query results. + """ + aliases = list(aliases) + while aliases: + alias = aliases.pop(0) + if self.alias_map[alias].join_type is None: + # This is the base table (first FROM entry) - this table + # isn't really joined at all in the query, so we should not + # alter its join type. + continue + # Only the first alias (skipped above) should have None join_type + assert self.alias_map[alias].join_type is not None + parent_alias = self.alias_map[alias].parent_alias + parent_louter = ( + parent_alias and self.alias_map[parent_alias].join_type == LOUTER + ) + already_louter = self.alias_map[alias].join_type == LOUTER + if (self.alias_map[alias].nullable or parent_louter) and not already_louter: + self.alias_map[alias] = self.alias_map[alias].promote() + # Join type of 'alias' changed, so re-examine all aliases that + # refer to this one. + aliases.extend( + join + for join in self.alias_map + if self.alias_map[join].parent_alias == alias + and join not in aliases + ) + + def demote_joins(self, aliases): + """ + Change join type from LOUTER to INNER for all joins in aliases. + + Similarly to promote_joins(), this method must ensure no join chains + containing first an outer, then an inner join are generated. If we + are demoting b->c join in chain a LOUTER b LOUTER c then we must + demote a->b automatically, or otherwise the demotion of b->c doesn't + actually change anything in the query results. . + """ + aliases = list(aliases) + while aliases: + alias = aliases.pop(0) + if self.alias_map[alias].join_type == LOUTER: + self.alias_map[alias] = self.alias_map[alias].demote() + parent_alias = self.alias_map[alias].parent_alias + if self.alias_map[parent_alias].join_type == INNER: + aliases.append(parent_alias) + + def reset_refcounts(self, to_counts): + """ + Reset reference counts for aliases so that they match the value passed + in `to_counts`. + """ + for alias, cur_refcount in self.alias_refcount.copy().items(): + unref_amount = cur_refcount - to_counts.get(alias, 0) + self.unref_alias(alias, unref_amount) + + def change_aliases(self, change_map): + """ + Change the aliases in change_map (which maps old-alias -> new-alias), + relabelling any references to them in select columns and the where + clause. + """ + # If keys and values of change_map were to intersect, an alias might be + # updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending + # on their order in change_map. + assert set(change_map).isdisjoint(change_map.values()) + + # 1. Update references in "select" (normal columns plus aliases), + # "group by" and "where". + self.where.relabel_aliases(change_map) + if isinstance(self.group_by, tuple): + self.group_by = tuple( + [col.relabeled_clone(change_map) for col in self.group_by] + ) + self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) + self.annotations = self.annotations and { + key: col.relabeled_clone(change_map) + for key, col in self.annotations.items() + } + + # 2. Rename the alias in the internal table/alias datastructures. + for old_alias, new_alias in change_map.items(): + if old_alias not in self.alias_map: + continue + alias_data = self.alias_map[old_alias].relabeled_clone(change_map) + self.alias_map[new_alias] = alias_data + self.alias_refcount[new_alias] = self.alias_refcount[old_alias] + del self.alias_refcount[old_alias] + del self.alias_map[old_alias] + + table_aliases = self.table_map[alias_data.table_name] + for pos, alias in enumerate(table_aliases): + if alias == old_alias: + table_aliases[pos] = new_alias + break + self.external_aliases = { + # Table is aliased or it's being changed and thus is aliased. + change_map.get(alias, alias): (aliased or alias in change_map) + for alias, aliased in self.external_aliases.items() + } + + def bump_prefix(self, other_query, exclude=None): + """ + Change the alias prefix to the next letter in the alphabet in a way + that the other query's aliases and this query's aliases will not + conflict. Even tables that previously had no alias will get an alias + after this call. To prevent changing aliases use the exclude parameter. + """ + + def prefix_gen(): + """ + Generate a sequence of characters in alphabetical order: + -> 'A', 'B', 'C', ... + + When the alphabet is finished, the sequence will continue with the + Cartesian product: + -> 'AA', 'AB', 'AC', ... + """ + alphabet = ascii_uppercase + prefix = chr(ord(self.alias_prefix) + 1) + yield prefix + for n in count(1): + seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet + for s in product(seq, repeat=n): + yield "".join(s) + prefix = None + + if self.alias_prefix != other_query.alias_prefix: + # No clashes between self and outer query should be possible. + return + + # Explicitly avoid infinite loop. The constant divider is based on how + # much depth recursive subquery references add to the stack. This value + # might need to be adjusted when adding or removing function calls from + # the code path in charge of performing these operations. + local_recursion_limit = sys.getrecursionlimit() // 16 + for pos, prefix in enumerate(prefix_gen()): + if prefix not in self.subq_aliases: + self.alias_prefix = prefix + break + if pos > local_recursion_limit: + raise RecursionError( + "Maximum recursion depth exceeded: too many subqueries." + ) + self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) + other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases) + if exclude is None: + exclude = {} + self.change_aliases( + { + alias: "%s%d" % (self.alias_prefix, pos) + for pos, alias in enumerate(self.alias_map) + if alias not in exclude + } + ) + + def get_initial_alias(self): + """ + Return the first alias for this query, after increasing its reference + count. + """ + if self.alias_map: + alias = self.base_table + self.ref_alias(alias) + elif self.model: + alias = self.join(self.base_table_class(self.get_meta().db_table, None)) + else: + alias = None + return alias + + def count_active_tables(self): + """ + Return the number of tables in this query with a non-zero reference + count. After execution, the reference counts are zeroed, so tables + added in compiler will not be seen by this method. + """ + return len([1 for count in self.alias_refcount.values() if count]) + + def join(self, join, reuse=None): + """ + Return an alias for the 'join', either reusing an existing alias for + that join or creating a new one. 'join' is either a base_table_class or + join_class. + + The 'reuse' parameter can be either None which means all joins are + reusable, or it can be a set containing the aliases that can be reused. + + A join is always created as LOUTER if the lhs alias is LOUTER to make + sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new + joins are created as LOUTER if the join is nullable. + """ + reuse_aliases = [ + a + for a, j in self.alias_map.items() + if (reuse is None or a in reuse) and j == join + ] + if reuse_aliases: + if join.table_alias in reuse_aliases: + reuse_alias = join.table_alias + else: + # Reuse the most recent alias of the joined table + # (a many-to-many relation may be joined multiple times). + reuse_alias = reuse_aliases[-1] + self.ref_alias(reuse_alias) + return reuse_alias + + # No reuse is possible, so we need a new alias. + alias, _ = self.table_alias( + join.table_name, create=True, filtered_relation=join.filtered_relation + ) + if join.join_type: + if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: + join_type = LOUTER + else: + join_type = INNER + join.join_type = join_type + join.table_alias = alias + self.alias_map[alias] = join + if filtered_relation := join.filtered_relation: + resolve_reuse = reuse + if resolve_reuse is not None: + resolve_reuse = set(reuse) | {alias} + joins_len = len(self.alias_map) + join.filtered_relation = filtered_relation.resolve_expression( + self, reuse=resolve_reuse + ) + # Some joins were during expression resolving, they must be present + # before the one we just added. + if joins_len < len(self.alias_map): + self.alias_map[alias] = self.alias_map.pop(alias) + return alias + + def join_parent_model(self, opts, model, alias, seen): + """ + Make sure the given 'model' is joined in the query. If 'model' isn't + a parent of 'opts' or if it is None this method is a no-op. + + The 'alias' is the root alias for starting the join, 'seen' is a dict + of model -> alias of existing joins. It must also contain a mapping + of None -> some alias. This will be returned in the no-op case. + """ + if model in seen: + return seen[model] + chain = opts.get_base_chain(model) + if not chain: + return alias + curr_opts = opts + for int_model in chain: + if int_model in seen: + curr_opts = int_model._meta + alias = seen[int_model] + continue + # Proxy model have elements in base chain + # with no parents, assign the new options + # object and skip to the next base in that + # case + if not curr_opts.parents[int_model]: + curr_opts = int_model._meta + continue + link_field = curr_opts.get_ancestor_link(int_model) + join_info = self.setup_joins([link_field.name], curr_opts, alias) + curr_opts = int_model._meta + alias = seen[int_model] = join_info.joins[-1] + return alias or seen[None] + + def check_alias(self, alias): + if FORBIDDEN_ALIAS_PATTERN.search(alias): + raise ValueError( + "Column aliases cannot contain whitespace characters, quotation marks, " + "semicolons, or SQL comments." + ) + + def add_annotation(self, annotation, alias, select=True): + """Add a single annotation expression to the Query.""" + self.check_alias(alias) + annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None) + if select: + self.append_annotation_mask([alias]) + else: + annotation_mask = ( + value + for value in dict.fromkeys(self.annotation_select) + if value != alias + ) + self.set_annotation_mask(annotation_mask) + self.annotations[alias] = annotation + + def resolve_expression(self, query, *args, **kwargs): + clone = self.clone() + # Subqueries need to use a different set of aliases than the outer query. + clone.bump_prefix(query) + clone.subquery = True + clone.where.resolve_expression(query, *args, **kwargs) + # Resolve combined queries. + if clone.combinator: + clone.combined_queries = tuple( + [ + combined_query.resolve_expression(query, *args, **kwargs) + for combined_query in clone.combined_queries + ] + ) + for key, value in clone.annotations.items(): + resolved = value.resolve_expression(query, *args, **kwargs) + if hasattr(resolved, "external_aliases"): + resolved.external_aliases.update(clone.external_aliases) + clone.annotations[key] = resolved + # Outer query's aliases are considered external. + for alias, table in query.alias_map.items(): + clone.external_aliases[alias] = ( + isinstance(table, Join) + and table.join_field.related_model._meta.db_table != alias + ) or ( + isinstance(table, BaseTable) and table.table_name != table.table_alias + ) + return clone + + def get_external_cols(self): + exprs = chain(self.annotations.values(), self.where.children) + return [ + col + for col in self._gen_cols(exprs, include_external=True) + if col.alias in self.external_aliases + ] + + def get_group_by_cols(self, wrapper=None): + # If wrapper is referenced by an alias for an explicit GROUP BY through + # values() a reference to this expression and not the self must be + # returned to ensure external column references are not grouped against + # as well. + external_cols = self.get_external_cols() + if any(col.possibly_multivalued for col in external_cols): + return [wrapper or self] + return external_cols + + def as_sql(self, compiler, connection): + # Some backends (e.g. Oracle) raise an error when a subquery contains + # unnecessary ORDER BY clause. + if ( + self.subquery + and not connection.features.ignores_unnecessary_order_by_in_subqueries + ): + self.clear_ordering(force=False) + for query in self.combined_queries: + query.clear_ordering(force=False) + sql, params = self.get_compiler(connection=connection).as_sql() + if self.subquery: + sql = "(%s)" % sql + return sql, params + + def resolve_lookup_value(self, value, can_reuse, allow_joins): + if hasattr(value, "resolve_expression"): + value = value.resolve_expression( + self, + reuse=can_reuse, + allow_joins=allow_joins, + ) + elif isinstance(value, (list, tuple)): + # The items of the iterable may be expressions and therefore need + # to be resolved independently. + values = ( + self.resolve_lookup_value(sub_value, can_reuse, allow_joins) + for sub_value in value + ) + type_ = type(value) + if hasattr(type_, "_make"): # namedtuple + return type_(*values) + return type_(values) + return value + + def solve_lookup_type(self, lookup, summarize=False): + """ + Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). + """ + lookup_splitted = lookup.split(LOOKUP_SEP) + if self.annotations: + annotation, expression_lookups = refs_expression( + lookup_splitted, self.annotations + ) + if annotation: + expression = self.annotations[annotation] + if summarize: + expression = Ref(annotation, expression) + return expression_lookups, (), expression + _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) + field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)] + if len(lookup_parts) > 1 and not field_parts: + raise FieldError( + 'Invalid lookup "%s" for model %s".' + % (lookup, self.get_meta().model.__name__) + ) + return lookup_parts, field_parts, False + + def check_query_object_type(self, value, opts, field): + """ + Check whether the object passed while querying is of the correct type. + If not, raise a ValueError specifying the wrong object. + """ + if hasattr(value, "_meta"): + if not check_rel_lookup_compatibility(value._meta.model, opts, field): + raise ValueError( + 'Cannot query "%s": Must be "%s" instance.' + % (value, opts.object_name) + ) + + def check_related_objects(self, field, value, opts): + """Check the type of object passed to query relations.""" + if field.is_relation: + # Check that the field and the queryset use the same model in a + # query like .filter(author=Author.objects.all()). For example, the + # opts would be Author's (from the author field) and value.model + # would be Author.objects.all() queryset's .model (Author also). + # The field is the related field on the lhs side. + if ( + isinstance(value, Query) + and not value.has_select_fields + and not check_rel_lookup_compatibility(value.model, opts, field) + ): + raise ValueError( + 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' + % (value.model._meta.object_name, opts.object_name) + ) + elif hasattr(value, "_meta"): + self.check_query_object_type(value, opts, field) + elif hasattr(value, "__iter__"): + for v in value: + self.check_query_object_type(v, opts, field) + + def check_filterable(self, expression): + """Raise an error if expression cannot be used in a WHERE clause.""" + if hasattr(expression, "resolve_expression") and not getattr( + expression, "filterable", True + ): + raise NotSupportedError( + expression.__class__.__name__ + " is disallowed in the filter " + "clause." + ) + if hasattr(expression, "get_source_expressions"): + for expr in expression.get_source_expressions(): + self.check_filterable(expr) + + def build_lookup(self, lookups, lhs, rhs): + """ + Try to extract transforms and lookup from given lhs. + + The lhs value is something that works like SQLExpression. + The rhs value is what the lookup is going to compare against. + The lookups is a list of names to extract using get_lookup() + and get_transform(). + """ + # __exact is the default lookup if one isn't given. + *transforms, lookup_name = lookups or ["exact"] + for name in transforms: + lhs = self.try_transform(lhs, name) + # First try get_lookup() so that the lookup takes precedence if the lhs + # supports both transform and lookup for the name. + lookup_class = lhs.get_lookup(lookup_name) + if not lookup_class: + # A lookup wasn't found. Try to interpret the name as a transform + # and do an Exact lookup against it. + lhs = self.try_transform(lhs, lookup_name) + lookup_name = "exact" + lookup_class = lhs.get_lookup(lookup_name) + if not lookup_class: + return + + lookup = lookup_class(lhs, rhs) + # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all + # uses of None as a query value unless the lookup supports it. + if lookup.rhs is None and not lookup.can_use_none_as_rhs: + if lookup_name not in ("exact", "iexact"): + raise ValueError("Cannot use None as a query value") + return lhs.get_lookup("isnull")(lhs, True) + + # For Oracle '' is equivalent to null. The check must be done at this + # stage because join promotion can't be done in the compiler. Using + # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. + # A similar thing is done in is_nullable(), too. + if ( + lookup_name == "exact" + and lookup.rhs == "" + and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls + ): + return lhs.get_lookup("isnull")(lhs, True) + + return lookup + + def try_transform(self, lhs, name): + """ + Helper method for build_lookup(). Try to fetch and initialize + a transform for name parameter from lhs. + """ + transform_class = lhs.get_transform(name) + if transform_class: + return transform_class(lhs) + else: + output_field = lhs.output_field.__class__ + suggested_lookups = difflib.get_close_matches( + name, lhs.output_field.get_lookups() + ) + if suggested_lookups: + suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups) + else: + suggestion = "." + raise FieldError( + "Unsupported lookup '%s' for %s or join on the field not " + "permitted%s" % (name, output_field.__name__, suggestion) + ) + + def build_filter( + self, + filter_expr, + branch_negated=False, + current_negated=False, + can_reuse=None, + allow_joins=True, + split_subq=True, + check_filterable=True, + summarize=False, + update_join_types=True, + ): + """ + Build a WhereNode for a single filter clause but don't add it + to this Query. Query.add_q() will then add this filter to the where + Node. + + The 'branch_negated' tells us if the current branch contains any + negations. This will be used to determine if subqueries are needed. + + The 'current_negated' is used to determine if the current filter is + negated or not and this will be used to determine if IS NULL filtering + is needed. + + The difference between current_negated and branch_negated is that + branch_negated is set on first negation, but current_negated is + flipped for each negation. + + Note that add_filter will not do any negating itself, that is done + upper in the code by add_q(). + + The 'can_reuse' is a set of reusable joins for multijoins. + + The method will create a filter clause that can be added to the current + query. However, if the filter isn't added to the query then the caller + is responsible for unreffing the joins used. + """ + if isinstance(filter_expr, dict): + raise FieldError("Cannot parse keyword query as dict") + if isinstance(filter_expr, Q): + return self._add_q( + filter_expr, + branch_negated=branch_negated, + current_negated=current_negated, + used_aliases=can_reuse, + allow_joins=allow_joins, + split_subq=split_subq, + check_filterable=check_filterable, + summarize=summarize, + update_join_types=update_join_types, + ) + if hasattr(filter_expr, "resolve_expression"): + if not getattr(filter_expr, "conditional", False): + raise TypeError("Cannot filter against a non-conditional expression.") + condition = filter_expr.resolve_expression( + self, allow_joins=allow_joins, reuse=can_reuse, summarize=summarize + ) + if not isinstance(condition, Lookup): + condition = self.build_lookup(["exact"], condition, True) + return WhereNode([condition], connector=AND), [] + arg, value = filter_expr + if not arg: + raise FieldError("Cannot parse keyword query %r" % arg) + lookups, parts, reffed_expression = self.solve_lookup_type(arg, summarize) + + if check_filterable: + self.check_filterable(reffed_expression) + + if not allow_joins and len(parts) > 1: + raise FieldError("Joined field references are not permitted in this query") + + pre_joins = self.alias_refcount.copy() + value = self.resolve_lookup_value(value, can_reuse, allow_joins) + used_joins = { + k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0) + } + + if check_filterable: + self.check_filterable(value) + + if reffed_expression: + condition = self.build_lookup(lookups, reffed_expression, value) + return WhereNode([condition], connector=AND), [] + + opts = self.get_meta() + alias = self.get_initial_alias() + allow_many = not branch_negated or not split_subq + + try: + join_info = self.setup_joins( + parts, + opts, + alias, + can_reuse=can_reuse, + allow_many=allow_many, + ) + + # Prevent iterator from being consumed by check_related_objects() + if isinstance(value, Iterator): + value = list(value) + self.check_related_objects(join_info.final_field, value, join_info.opts) + + # split_exclude() needs to know which joins were generated for the + # lookup parts + self._lookup_joins = join_info.joins + except MultiJoin as e: + return self.split_exclude(filter_expr, can_reuse, e.names_with_path) + + # Update used_joins before trimming since they are reused to determine + # which joins could be later promoted to INNER. + used_joins.update(join_info.joins) + targets, alias, join_list = self.trim_joins( + join_info.targets, join_info.joins, join_info.path + ) + if can_reuse is not None: + can_reuse.update(join_list) + + if join_info.final_field.is_relation: + if len(targets) == 1: + col = self._get_col(targets[0], join_info.final_field, alias) + else: + col = MultiColSource( + alias, targets, join_info.targets, join_info.final_field + ) + else: + col = self._get_col(targets[0], join_info.final_field, alias) + + condition = self.build_lookup(lookups, col, value) + lookup_type = condition.lookup_name + clause = WhereNode([condition], connector=AND) + + require_outer = ( + lookup_type == "isnull" and condition.rhs is True and not current_negated + ) + if ( + current_negated + and (lookup_type != "isnull" or condition.rhs is False) + and condition.rhs is not None + ): + require_outer = True + if lookup_type != "isnull": + # The condition added here will be SQL like this: + # NOT (col IS NOT NULL), where the first NOT is added in + # upper layers of code. The reason for addition is that if col + # is null, then col != someval will result in SQL "unknown" + # which isn't the same as in Python. The Python None handling + # is wanted, and it can be gotten by + # (col IS NULL OR col != someval) + # <=> + # NOT (col IS NOT NULL AND col = someval). + if ( + self.is_nullable(targets[0]) + or self.alias_map[join_list[-1]].join_type == LOUTER + ): + lookup_class = targets[0].get_lookup("isnull") + col = self._get_col(targets[0], join_info.targets[0], alias) + clause.add(lookup_class(col, False), AND) + # If someval is a nullable column, someval IS NOT NULL is + # added. + if isinstance(value, Col) and self.is_nullable(value.target): + lookup_class = value.target.get_lookup("isnull") + clause.add(lookup_class(value, False), AND) + return clause, used_joins if not require_outer else () + + def add_filter(self, filter_lhs, filter_rhs): + self.add_q(Q((filter_lhs, filter_rhs))) + + def add_q(self, q_object): + """ + A preprocessor for the internal _add_q(). Responsible for doing final + join promotion. + """ + # For join promotion this case is doing an AND for the added q_object + # and existing conditions. So, any existing inner join forces the join + # type to remain inner. Existing outer joins can however be demoted. + # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if + # rel_a doesn't produce any rows, then the whole condition must fail. + # So, demotion is OK. + existing_inner = { + a for a in self.alias_map if self.alias_map[a].join_type == INNER + } + clause, _ = self._add_q(q_object, self.used_aliases) + if clause: + self.where.add(clause, AND) + self.demote_joins(existing_inner) + + def build_where(self, filter_expr): + return self.build_filter(filter_expr, allow_joins=False)[0] + + def clear_where(self): + self.where = WhereNode() + + def _add_q( + self, + q_object, + used_aliases, + branch_negated=False, + current_negated=False, + allow_joins=True, + split_subq=True, + check_filterable=True, + summarize=False, + update_join_types=True, + ): + """Add a Q-object to the current filter.""" + connector = q_object.connector + current_negated ^= q_object.negated + branch_negated = branch_negated or q_object.negated + target_clause = WhereNode(connector=connector, negated=q_object.negated) + joinpromoter = JoinPromoter( + q_object.connector, len(q_object.children), current_negated + ) + for child in q_object.children: + child_clause, needed_inner = self.build_filter( + child, + can_reuse=used_aliases, + branch_negated=branch_negated, + current_negated=current_negated, + allow_joins=allow_joins, + split_subq=split_subq, + check_filterable=check_filterable, + summarize=summarize, + update_join_types=update_join_types, + ) + joinpromoter.add_votes(needed_inner) + if child_clause: + target_clause.add(child_clause, connector) + if update_join_types: + needed_inner = joinpromoter.update_join_types(self) + else: + needed_inner = [] + return target_clause, needed_inner + + def add_filtered_relation(self, filtered_relation, alias): + filtered_relation.alias = alias + lookups = dict(get_children_from_q(filtered_relation.condition)) + relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type( + filtered_relation.relation_name + ) + if relation_lookup_parts: + raise ValueError( + "FilteredRelation's relation_name cannot contain lookups " + "(got %r)." % filtered_relation.relation_name + ) + for lookup in chain(lookups): + lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup) + shift = 2 if not lookup_parts else 1 + lookup_field_path = lookup_field_parts[:-shift] + for idx, lookup_field_part in enumerate(lookup_field_path): + if len(relation_field_parts) > idx: + if relation_field_parts[idx] != lookup_field_part: + raise ValueError( + "FilteredRelation's condition doesn't support " + "relations outside the %r (got %r)." + % (filtered_relation.relation_name, lookup) + ) + else: + raise ValueError( + "FilteredRelation's condition doesn't support nested " + "relations deeper than the relation_name (got %r for " + "%r)." % (lookup, filtered_relation.relation_name) + ) + filtered_relation.condition = rename_prefix_from_q( + filtered_relation.relation_name, + alias, + filtered_relation.condition, + ) + self._filtered_relations[filtered_relation.alias] = filtered_relation + + def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): + """ + Walk the list of names and turns them into PathInfo tuples. A single + name in 'names' can generate multiple PathInfos (m2m, for example). + + 'names' is the path of names to travel, 'opts' is the model Options we + start the name resolving from, 'allow_many' is as for setup_joins(). + If fail_on_missing is set to True, then a name that can't be resolved + will generate a FieldError. + + Return a list of PathInfo tuples. In addition return the final field + (the last used join field) and target (which is a field guaranteed to + contain the same value as the final field). Finally, return those names + that weren't found (which are likely transforms and the final lookup). + """ + path, names_with_path = [], [] + for pos, name in enumerate(names): + cur_names_with_path = (name, []) + if name == "pk": + name = opts.pk.name + + field = None + filtered_relation = None + try: + if opts is None: + raise FieldDoesNotExist + field = opts.get_field(name) + except FieldDoesNotExist: + if name in self.annotation_select: + field = self.annotation_select[name].output_field + elif name in self._filtered_relations and pos == 0: + filtered_relation = self._filtered_relations[name] + if LOOKUP_SEP in filtered_relation.relation_name: + parts = filtered_relation.relation_name.split(LOOKUP_SEP) + filtered_relation_path, field, _, _ = self.names_to_path( + parts, + opts, + allow_many, + fail_on_missing, + ) + path.extend(filtered_relation_path[:-1]) + else: + field = opts.get_field(filtered_relation.relation_name) + if field is not None: + # Fields that contain one-to-many relations with a generic + # model (like a GenericForeignKey) cannot generate reverse + # relations and therefore cannot be used for reverse querying. + if field.is_relation and not field.related_model: + raise FieldError( + "Field %r does not generate an automatic reverse " + "relation and therefore cannot be used for reverse " + "querying. If it is a GenericForeignKey, consider " + "adding a GenericRelation." % name + ) + try: + model = field.model._meta.concrete_model + except AttributeError: + # QuerySet.annotate() may introduce fields that aren't + # attached to a model. + model = None + else: + # We didn't find the current field, so move position back + # one step. + pos -= 1 + if pos == -1 or fail_on_missing: + available = sorted( + [ + *get_field_names_from_opts(opts), + *self.annotation_select, + *self._filtered_relations, + ] + ) + raise FieldError( + "Cannot resolve keyword '%s' into field. " + "Choices are: %s" % (name, ", ".join(available)) + ) + break + # Check if we need any joins for concrete inheritance cases (the + # field lives in parent, but we are currently in one of its + # children) + if opts is not None and model is not opts.model: + path_to_parent = opts.get_path_to_parent(model) + if path_to_parent: + path.extend(path_to_parent) + cur_names_with_path[1].extend(path_to_parent) + opts = path_to_parent[-1].to_opts + if hasattr(field, "path_infos"): + if filtered_relation: + pathinfos = field.get_path_info(filtered_relation) + else: + pathinfos = field.path_infos + if not allow_many: + for inner_pos, p in enumerate(pathinfos): + if p.m2m: + cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1]) + names_with_path.append(cur_names_with_path) + raise MultiJoin(pos + 1, names_with_path) + last = pathinfos[-1] + path.extend(pathinfos) + final_field = last.join_field + opts = last.to_opts + targets = last.target_fields + cur_names_with_path[1].extend(pathinfos) + names_with_path.append(cur_names_with_path) + else: + # Local non-relational field. + final_field = field + targets = (field,) + if fail_on_missing and pos + 1 != len(names): + raise FieldError( + "Cannot resolve keyword %r into field. Join on '%s'" + " not permitted." % (names[pos + 1], name) + ) + break + return path, final_field, targets, names[pos + 1 :] + + def setup_joins( + self, + names, + opts, + alias, + can_reuse=None, + allow_many=True, + ): + """ + Compute the necessary table joins for the passage through the fields + given in 'names'. 'opts' is the Options class for the current model + (which gives the table we are starting from), 'alias' is the alias for + the table to start the joining from. + + The 'can_reuse' defines the reverse foreign key joins we can reuse. It + can be None in which case all joins are reusable or a set of aliases + that can be reused. Note that non-reverse foreign keys are always + reusable when using setup_joins(). + + If 'allow_many' is False, then any reverse foreign key seen will + generate a MultiJoin exception. + + Return the final field involved in the joins, the target field (used + for any 'where' constraint), the final 'opts' value, the joins, the + field path traveled to generate the joins, and a transform function + that takes a field and alias and is equivalent to `field.get_col(alias)` + in the simple case but wraps field transforms if they were included in + names. + + The target field is the field containing the concrete value. Final + field can be something different, for example foreign key pointing to + that value. Final field is needed for example in some value + conversions (convert 'obj' in fk__id=obj to pk val using the foreign + key field for example). + """ + joins = [alias] + # The transform can't be applied yet, as joins must be trimmed later. + # To avoid making every caller of this method look up transforms + # directly, compute transforms here and create a partial that converts + # fields to the appropriate wrapped version. + + def final_transformer(field, alias): + if not self.alias_cols: + alias = None + return field.get_col(alias) + + # Try resolving all the names as fields first. If there's an error, + # treat trailing names as lookups until a field can be resolved. + last_field_exception = None + for pivot in range(len(names), 0, -1): + try: + path, final_field, targets, rest = self.names_to_path( + names[:pivot], + opts, + allow_many, + fail_on_missing=True, + ) + except FieldError as exc: + if pivot == 1: + # The first item cannot be a lookup, so it's safe + # to raise the field error here. + raise + else: + last_field_exception = exc + else: + # The transforms are the remaining items that couldn't be + # resolved into fields. + transforms = names[pivot:] + break + for name in transforms: + + def transform(field, alias, *, name, previous): + try: + wrapped = previous(field, alias) + return self.try_transform(wrapped, name) + except FieldError: + # FieldError is raised if the transform doesn't exist. + if isinstance(final_field, Field) and last_field_exception: + raise last_field_exception + else: + raise + + final_transformer = functools.partial( + transform, name=name, previous=final_transformer + ) + final_transformer.has_transforms = True + # Then, add the path to the query's joins. Note that we can't trim + # joins at this stage - we will need the information about join type + # of the trimmed joins. + for join in path: + if join.filtered_relation: + filtered_relation = join.filtered_relation.clone() + table_alias = filtered_relation.alias + else: + filtered_relation = None + table_alias = None + opts = join.to_opts + if join.direct: + nullable = self.is_nullable(join.join_field) + else: + nullable = True + connection = self.join_class( + opts.db_table, + alias, + table_alias, + INNER, + join.join_field, + nullable, + filtered_relation=filtered_relation, + ) + reuse = can_reuse if join.m2m else None + alias = self.join(connection, reuse=reuse) + joins.append(alias) + return JoinInfo(final_field, targets, opts, joins, path, final_transformer) + + def trim_joins(self, targets, joins, path): + """ + The 'target' parameter is the final field being joined to, 'joins' + is the full list of join aliases. The 'path' contain the PathInfos + used to create the joins. + + Return the final target field and table alias and the new active + joins. + + Always trim any direct join if the target column is already in the + previous table. Can't trim reverse joins as it's unknown if there's + anything on the other side of the join. + """ + joins = joins[:] + for pos, info in enumerate(reversed(path)): + if len(joins) == 1 or not info.direct: + break + if info.filtered_relation: + break + join_targets = {t.column for t in info.join_field.foreign_related_fields} + cur_targets = {t.column for t in targets} + if not cur_targets.issubset(join_targets): + break + targets_dict = { + r[1].column: r[0] + for r in info.join_field.related_fields + if r[1].column in cur_targets + } + targets = tuple(targets_dict[t.column] for t in targets) + self.unref_alias(joins.pop()) + return targets, joins[-1], joins + + @classmethod + def _gen_cols(cls, exprs, include_external=False, resolve_refs=True): + for expr in exprs: + if isinstance(expr, Col): + yield expr + elif include_external and callable( + getattr(expr, "get_external_cols", None) + ): + yield from expr.get_external_cols() + elif hasattr(expr, "get_source_expressions"): + if not resolve_refs and isinstance(expr, Ref): + continue + yield from cls._gen_cols( + expr.get_source_expressions(), + include_external=include_external, + resolve_refs=resolve_refs, + ) + + @classmethod + def _gen_col_aliases(cls, exprs): + yield from (expr.alias for expr in cls._gen_cols(exprs)) + + def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): + annotation = self.annotations.get(name) + if annotation is not None: + if not allow_joins: + for alias in self._gen_col_aliases([annotation]): + if isinstance(self.alias_map[alias], Join): + raise FieldError( + "Joined field references are not permitted in this query" + ) + if summarize: + # Summarize currently means we are doing an aggregate() query + # which is executed as a wrapped subquery if any of the + # aggregate() elements reference an existing annotation. In + # that case we need to return a Ref to the subquery's annotation. + if name not in self.annotation_select: + raise FieldError( + "Cannot aggregate over the '%s' alias. Use annotate() " + "to promote it." % name + ) + return Ref(name, self.annotation_select[name]) + else: + return annotation + else: + field_list = name.split(LOOKUP_SEP) + annotation = self.annotations.get(field_list[0]) + if annotation is not None: + for transform in field_list[1:]: + annotation = self.try_transform(annotation, transform) + return annotation + join_info = self.setup_joins( + field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse + ) + targets, final_alias, join_list = self.trim_joins( + join_info.targets, join_info.joins, join_info.path + ) + if not allow_joins and len(join_list) > 1: + raise FieldError( + "Joined field references are not permitted in this query" + ) + if len(targets) > 1: + raise FieldError( + "Referencing multicolumn fields with F() objects isn't supported" + ) + # Verify that the last lookup in name is a field or a transform: + # transform_function() raises FieldError if not. + transform = join_info.transform_function(targets[0], final_alias) + if reuse is not None: + reuse.update(join_list) + return transform + + def split_exclude(self, filter_expr, can_reuse, names_with_path): + """ + When doing an exclude against any kind of N-to-many relation, we need + to use a subquery. This method constructs the nested query, given the + original exclude filter (filter_expr) and the portion up to the first + N-to-many relation field. + + For example, if the origin filter is ~Q(child__name='foo'), filter_expr + is ('child__name', 'foo') and can_reuse is a set of joins usable for + filters in the original query. + + We will turn this into equivalent of: + WHERE NOT EXISTS( + SELECT 1 + FROM child + WHERE name = 'foo' AND child.parent_id = parent.id + LIMIT 1 + ) + """ + # Generate the inner query. + query = self.__class__(self.model) + query._filtered_relations = self._filtered_relations + filter_lhs, filter_rhs = filter_expr + if isinstance(filter_rhs, OuterRef): + filter_rhs = OuterRef(filter_rhs) + elif isinstance(filter_rhs, F): + filter_rhs = OuterRef(filter_rhs.name) + query.add_filter(filter_lhs, filter_rhs) + query.clear_ordering(force=True) + # Try to have as simple as possible subquery -> trim leading joins from + # the subquery. + trimmed_prefix, contains_louter = query.trim_start(names_with_path) + + col = query.select[0] + select_field = col.target + alias = col.alias + if alias in can_reuse: + pk = select_field.model._meta.pk + # Need to add a restriction so that outer query's filters are in effect for + # the subquery, too. + query.bump_prefix(self) + lookup_class = select_field.get_lookup("exact") + # Note that the query.select[0].alias is different from alias + # due to bump_prefix above. + lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) + query.where.add(lookup, AND) + query.external_aliases[alias] = True + else: + lookup_class = select_field.get_lookup("exact") + lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix)) + query.where.add(lookup, AND) + + condition, needed_inner = self.build_filter(Exists(query)) + + if contains_louter: + or_null_condition, _ = self.build_filter( + ("%s__isnull" % trimmed_prefix, True), + current_negated=True, + branch_negated=True, + can_reuse=can_reuse, + ) + condition.add(or_null_condition, OR) + # Note that the end result will be: + # NOT EXISTS (inner_q) OR outercol IS NULL + # this might look crazy but due to how NULL works, this seems to be + # correct. If the IS NULL check is removed, then if outercol + # IS NULL we will not match the row. + return condition, needed_inner + + def set_empty(self): + self.where.add(NothingNode(), AND) + for query in self.combined_queries: + query.set_empty() + + def is_empty(self): + return any(isinstance(c, NothingNode) for c in self.where.children) + + def set_limits(self, low=None, high=None): + """ + Adjust the limits on the rows retrieved. Use low/high to set these, + as it makes it more Pythonic to read and write. When the SQL query is + created, convert them to the appropriate offset and limit values. + + Apply any limits passed in here to the existing constraints. Add low + to the current low value and clamp both to any existing high value. + """ + if high is not None: + if self.high_mark is not None: + self.high_mark = min(self.high_mark, self.low_mark + high) + else: + self.high_mark = self.low_mark + high + if low is not None: + if self.high_mark is not None: + self.low_mark = min(self.high_mark, self.low_mark + low) + else: + self.low_mark = self.low_mark + low + + if self.low_mark == self.high_mark: + self.set_empty() + + def clear_limits(self): + """Clear any existing limits.""" + self.low_mark, self.high_mark = 0, None + + @property + def is_sliced(self): + return self.low_mark != 0 or self.high_mark is not None + + def has_limit_one(self): + return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 + + def can_filter(self): + """ + Return True if adding filters to this instance is still possible. + + Typically, this means no limits or offsets have been put on the results. + """ + return not self.is_sliced + + def clear_select_clause(self): + """Remove all fields from SELECT clause.""" + self.select = () + self.default_cols = False + self.select_related = False + self.set_extra_mask(()) + self.set_annotation_mask(()) + + def clear_select_fields(self): + """ + Clear the list of fields to select (but not extra_select columns). + Some queryset types completely replace any existing list of select + columns. + """ + self.select = () + self.values_select = () + + def add_select_col(self, col, name): + self.select += (col,) + self.values_select += (name,) + + def set_select(self, cols): + self.default_cols = False + self.select = tuple(cols) + + def add_distinct_fields(self, *field_names): + """ + Add and resolve the given fields to the query's "distinct on" clause. + """ + self.distinct_fields = field_names + self.distinct = True + + def add_fields(self, field_names, allow_m2m=True): + """ + Add the given (model) fields to the select set. Add the field names in + the order specified. + """ + alias = self.get_initial_alias() + opts = self.get_meta() + + try: + cols = [] + for name in field_names: + # Join promotion note - we must not remove any rows here, so + # if there is no existing joins, use outer join. + join_info = self.setup_joins( + name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m + ) + targets, final_alias, joins = self.trim_joins( + join_info.targets, + join_info.joins, + join_info.path, + ) + for target in targets: + cols.append(join_info.transform_function(target, final_alias)) + if cols: + self.set_select(cols) + except MultiJoin: + raise FieldError("Invalid field name: '%s'" % name) + except FieldError: + if LOOKUP_SEP in name: + # For lookups spanning over relationships, show the error + # from the model on which the lookup failed. + raise + else: + names = sorted( + [ + *get_field_names_from_opts(opts), + *self.extra, + *self.annotation_select, + *self._filtered_relations, + ] + ) + raise FieldError( + "Cannot resolve keyword %r into field. " + "Choices are: %s" % (name, ", ".join(names)) + ) + + def add_ordering(self, *ordering): + """ + Add items from the 'ordering' sequence to the query's "order by" + clause. These items are either field names (not column names) -- + possibly with a direction prefix ('-' or '?') -- or OrderBy + expressions. + + If 'ordering' is empty, clear all ordering from the query. + """ + errors = [] + for item in ordering: + if isinstance(item, str): + if item == "?": + continue + item = item.removeprefix("-") + if item in self.annotations: + continue + if self.extra and item in self.extra: + continue + # names_to_path() validates the lookup. A descriptive + # FieldError will be raise if it's not. + self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) + elif not hasattr(item, "resolve_expression"): + errors.append(item) + if getattr(item, "contains_aggregate", False): + raise FieldError( + "Using an aggregate in order_by() without also including " + "it in annotate() is not allowed: %s" % item + ) + if errors: + raise FieldError("Invalid order_by arguments: %s" % errors) + if ordering: + self.order_by += ordering + else: + self.default_ordering = False + + def clear_ordering(self, force=False, clear_default=True): + """ + Remove any ordering settings if the current query allows it without + side effects, set 'force' to True to clear the ordering regardless. + If 'clear_default' is True, there will be no ordering in the resulting + query (not even the model's default). + """ + if not force and ( + self.is_sliced or self.distinct_fields or self.select_for_update + ): + return + self.order_by = () + self.extra_order_by = () + if clear_default: + self.default_ordering = False + + def set_group_by(self, allow_aliases=True): + """ + Expand the GROUP BY clause required by the query. + + This will usually be the set of all non-aggregate fields in the + return data. If the database backend supports grouping by the + primary key, and the query would be equivalent, the optimization + will be made automatically. + """ + if allow_aliases and self.values_select: + # If grouping by aliases is allowed assign selected value aliases + # by moving them to annotations. + group_by_annotations = {} + values_select = {} + for alias, expr in zip(self.values_select, self.select): + if isinstance(expr, Col): + values_select[alias] = expr + else: + group_by_annotations[alias] = expr + self.annotations = {**group_by_annotations, **self.annotations} + self.append_annotation_mask(group_by_annotations) + self.select = tuple(values_select.values()) + self.values_select = tuple(values_select) + group_by = list(self.select) + for alias, annotation in self.annotation_select.items(): + if not (group_by_cols := annotation.get_group_by_cols()): + continue + if allow_aliases and not annotation.contains_aggregate: + group_by.append(Ref(alias, annotation)) + else: + group_by.extend(group_by_cols) + self.group_by = tuple(group_by) + + def add_select_related(self, fields): + """ + Set up the select_related data structure so that we only select + certain related models (as opposed to all models, when + self.select_related=True). + """ + if isinstance(self.select_related, bool): + field_dict = {} + else: + field_dict = self.select_related + for field in fields: + d = field_dict + for part in field.split(LOOKUP_SEP): + d = d.setdefault(part, {}) + self.select_related = field_dict + + def add_extra(self, select, select_params, where, params, tables, order_by): + """ + Add data to the various extra_* attributes for user-created additions + to the query. + """ + if select: + # We need to pair any placeholder markers in the 'select' + # dictionary with their parameters in 'select_params' so that + # subsequent updates to the select dictionary also adjust the + # parameters appropriately. + select_pairs = {} + if select_params: + param_iter = iter(select_params) + else: + param_iter = iter([]) + for name, entry in select.items(): + self.check_alias(name) + entry = str(entry) + entry_params = [] + pos = entry.find("%s") + while pos != -1: + if pos == 0 or entry[pos - 1] != "%": + entry_params.append(next(param_iter)) + pos = entry.find("%s", pos + 2) + select_pairs[name] = (entry, entry_params) + self.extra.update(select_pairs) + if where or params: + self.where.add(ExtraWhere(where, params), AND) + if tables: + self.extra_tables += tuple(tables) + if order_by: + self.extra_order_by = order_by + + def clear_deferred_loading(self): + """Remove any fields from the deferred loading set.""" + self.deferred_loading = (frozenset(), True) + + def add_deferred_loading(self, field_names): + """ + Add the given list of model field names to the set of fields to + exclude from loading from the database when automatic column selection + is done. Add the new field names to any existing field names that + are deferred (or removed from any existing field names that are marked + as the only ones for immediate loading). + """ + # Fields on related models are stored in the literal double-underscore + # format, so that we can use a set datastructure. We do the foo__bar + # splitting and handling when computing the SQL column names (as part of + # get_columns()). + existing, defer = self.deferred_loading + if defer: + # Add to existing deferred names. + self.deferred_loading = existing.union(field_names), True + else: + # Remove names from the set of any existing "immediate load" names. + if new_existing := existing.difference(field_names): + self.deferred_loading = new_existing, False + else: + self.clear_deferred_loading() + if new_only := set(field_names).difference(existing): + self.deferred_loading = new_only, True + + def add_immediate_loading(self, field_names): + """ + Add the given list of model field names to the set of fields to + retrieve when the SQL is executed ("immediate loading" fields). The + field names replace any existing immediate loading field names. If + there are field names already specified for deferred loading, remove + those names from the new field_names before storing the new names + for immediate loading. (That is, immediate loading overrides any + existing immediate values, but respects existing deferrals.) + """ + existing, defer = self.deferred_loading + field_names = set(field_names) + if "pk" in field_names: + field_names.remove("pk") + field_names.add(self.get_meta().pk.name) + + if defer: + # Remove any existing deferred names from the current set before + # setting the new names. + self.deferred_loading = field_names.difference(existing), False + else: + # Replace any existing "immediate load" field names. + self.deferred_loading = frozenset(field_names), False + + def set_annotation_mask(self, names): + """Set the mask of annotations that will be returned by the SELECT.""" + if names is None: + self.annotation_select_mask = None + else: + self.annotation_select_mask = list(dict.fromkeys(names)) + self._annotation_select_cache = None + + def append_annotation_mask(self, names): + if self.annotation_select_mask is not None: + self.set_annotation_mask((*self.annotation_select_mask, *names)) + + def set_extra_mask(self, names): + """ + Set the mask of extra select items that will be returned by SELECT. + Don't remove them from the Query since they might be used later. + """ + if names is None: + self.extra_select_mask = None + else: + self.extra_select_mask = set(names) + self._extra_select_cache = None + + def set_values(self, fields): + self.select_related = False + self.clear_deferred_loading() + self.clear_select_fields() + self.has_select_fields = True + + if fields: + field_names = [] + extra_names = [] + annotation_names = [] + if not self.extra and not self.annotations: + # Shortcut - if there are no extra or annotations, then + # the values() clause must be just field names. + field_names = list(fields) + else: + self.default_cols = False + for f in fields: + if f in self.extra_select: + extra_names.append(f) + elif f in self.annotation_select: + annotation_names.append(f) + elif f in self.annotations: + raise FieldError( + f"Cannot select the '{f}' alias. Use annotate() to " + "promote it." + ) + else: + # Call `names_to_path` to ensure a FieldError including + # annotations about to be masked as valid choices if + # `f` is not resolvable. + if self.annotation_select: + self.names_to_path(f.split(LOOKUP_SEP), self.model._meta) + field_names.append(f) + self.set_extra_mask(extra_names) + self.set_annotation_mask(annotation_names) + selected = frozenset(field_names + extra_names + annotation_names) + else: + field_names = [f.attname for f in self.model._meta.concrete_fields] + selected = frozenset(field_names) + # Selected annotations must be known before setting the GROUP BY + # clause. + if self.group_by is True: + self.add_fields( + (f.attname for f in self.model._meta.concrete_fields), False + ) + # Disable GROUP BY aliases to avoid orphaning references to the + # SELECT clause which is about to be cleared. + self.set_group_by(allow_aliases=False) + self.clear_select_fields() + elif self.group_by: + # Resolve GROUP BY annotation references if they are not part of + # the selected fields anymore. + group_by = [] + for expr in self.group_by: + if isinstance(expr, Ref) and expr.refs not in selected: + expr = self.annotations[expr.refs] + group_by.append(expr) + self.group_by = tuple(group_by) + + self.values_select = tuple(field_names) + self.add_fields(field_names, True) + + @property + def annotation_select(self): + """ + Return the dictionary of aggregate columns that are not masked and + should be used in the SELECT clause. Cache this result for performance. + """ + if self._annotation_select_cache is not None: + return self._annotation_select_cache + elif not self.annotations: + return {} + elif self.annotation_select_mask is not None: + self._annotation_select_cache = { + k: self.annotations[k] + for k in self.annotation_select_mask + if k in self.annotations + } + return self._annotation_select_cache + else: + return self.annotations + + @property + def extra_select(self): + if self._extra_select_cache is not None: + return self._extra_select_cache + if not self.extra: + return {} + elif self.extra_select_mask is not None: + self._extra_select_cache = { + k: v for k, v in self.extra.items() if k in self.extra_select_mask + } + return self._extra_select_cache + else: + return self.extra + + def trim_start(self, names_with_path): + """ + Trim joins from the start of the join path. The candidates for trim + are the PathInfos in names_with_path structure that are m2m joins. + + Also set the select column so the start matches the join. + + This method is meant to be used for generating the subquery joins & + cols in split_exclude(). + + Return a lookup usable for doing outerq.filter(lookup=self) and a + boolean indicating if the joins in the prefix contain a LEFT OUTER join. + _""" + all_paths = [] + for _, paths in names_with_path: + all_paths.extend(paths) + contains_louter = False + # Trim and operate only on tables that were generated for + # the lookup part of the query. That is, avoid trimming + # joins generated for F() expressions. + lookup_tables = [ + t for t in self.alias_map if t in self._lookup_joins or t == self.base_table + ] + for trimmed_paths, path in enumerate(all_paths): + if path.m2m: + break + if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: + contains_louter = True + alias = lookup_tables[trimmed_paths] + self.unref_alias(alias) + # The path.join_field is a Rel, lets get the other side's field + join_field = path.join_field.field + # Build the filter prefix. + paths_in_prefix = trimmed_paths + trimmed_prefix = [] + for name, path in names_with_path: + if paths_in_prefix - len(path) < 0: + break + trimmed_prefix.append(name) + paths_in_prefix -= len(path) + trimmed_prefix.append(join_field.foreign_related_fields[0].name) + trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) + # Lets still see if we can trim the first join from the inner query + # (that is, self). We can't do this for: + # - LEFT JOINs because we would miss those rows that have nothing on + # the outer side, + # - INNER JOINs from filtered relations because we would miss their + # filters. + first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] + if first_join.join_type != LOUTER and not first_join.filtered_relation: + select_fields = [r[0] for r in join_field.related_fields] + select_alias = lookup_tables[trimmed_paths + 1] + self.unref_alias(lookup_tables[trimmed_paths]) + extra_restriction = join_field.get_extra_restriction( + None, lookup_tables[trimmed_paths + 1] + ) + if extra_restriction: + self.where.add(extra_restriction, AND) + else: + # TODO: It might be possible to trim more joins from the start of the + # inner query if it happens to have a longer join chain containing the + # values in select_fields. Lets punt this one for now. + select_fields = [r[1] for r in join_field.related_fields] + select_alias = lookup_tables[trimmed_paths] + # The found starting point is likely a join_class instead of a + # base_table_class reference. But the first entry in the query's FROM + # clause must not be a JOIN. + for table in self.alias_map: + if self.alias_refcount[table] > 0: + self.alias_map[table] = self.base_table_class( + self.alias_map[table].table_name, + table, + ) + break + self.set_select([f.get_col(select_alias) for f in select_fields]) + return trimmed_prefix, contains_louter + + def is_nullable(self, field): + """ + Check if the given field should be treated as nullable. + + Some backends treat '' as null and Django treats such fields as + nullable for those backends. In such situations field.null can be + False even if we should treat the field as nullable. + """ + # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have + # (nor should it have) knowledge of which connection is going to be + # used. The proper fix would be to defer all decisions where + # is_nullable() is needed to the compiler stage, but that is not easy + # to do currently. + return field.null or ( + field.empty_strings_allowed + and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls + ) + + +def get_order_dir(field, default="ASC"): + """ + Return the field name and direction for an order specification. For + example, '-foo' is returned as ('foo', 'DESC'). + + The 'default' param is used to indicate which way no prefix (or a '+' + prefix) should sort. The '-' prefix always sorts the opposite way. + """ + dirn = ORDER_DIR[default] + if field[0] == "-": + return field[1:], dirn[1] + return field, dirn[0] + + +class JoinPromoter: + """ + A class to abstract away join promotion problems for complex filter + conditions. + """ + + def __init__(self, connector, num_children, negated): + self.connector = connector + self.negated = negated + if self.negated: + if connector == AND: + self.effective_connector = OR + else: + self.effective_connector = AND + else: + self.effective_connector = self.connector + self.num_children = num_children + # Maps of table alias to how many times it is seen as required for + # inner and/or outer joins. + self.votes = Counter() + + def __repr__(self): + return ( + f"{self.__class__.__qualname__}(connector={self.connector!r}, " + f"num_children={self.num_children!r}, negated={self.negated!r})" + ) + + def add_votes(self, votes): + """ + Add single vote per item to self.votes. Parameter can be any + iterable. + """ + self.votes.update(votes) + + def update_join_types(self, query): + """ + Change join types so that the generated query is as efficient as + possible, but still correct. So, change as many joins as possible + to INNER, but don't make OUTER joins INNER if that could remove + results from the query. + """ + to_promote = set() + to_demote = set() + # The effective_connector is used so that NOT (a AND b) is treated + # similarly to (a OR b) for join promotion. + for table, votes in self.votes.items(): + # We must use outer joins in OR case when the join isn't contained + # in all of the joins. Otherwise the INNER JOIN itself could remove + # valid results. Consider the case where a model with rel_a and + # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, + # if rel_a join doesn't produce any results is null (for example + # reverse foreign key or null value in direct foreign key), and + # there is a matching row in rel_b with col=2, then an INNER join + # to rel_a would remove a valid match from the query. So, we need + # to promote any existing INNER to LOUTER (it is possible this + # promotion in turn will be demoted later on). + if self.effective_connector == OR and votes < self.num_children: + to_promote.add(table) + # If connector is AND and there is a filter that can match only + # when there is a joinable row, then use INNER. For example, in + # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL + # as join output, then the col=1 or col=2 can't match (as + # NULL=anything is always false). + # For the OR case, if all children voted for a join to be inner, + # then we can use INNER for the join. For example: + # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) + # then if rel_a doesn't produce any rows, the whole condition + # can't match. Hence we can safely use INNER join. + if self.effective_connector == AND or ( + self.effective_connector == OR and votes == self.num_children + ): + to_demote.add(table) + # Finally, what happens in cases where we have: + # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 + # Now, we first generate the OR clause, and promote joins for it + # in the first if branch above. Both rel_a and rel_b are promoted + # to LOUTER joins. After that we do the AND case. The OR case + # voted no inner joins but the rel_a__col__gte=0 votes inner join + # for rel_a. We demote it back to INNER join (in AND case a single + # vote is enough). The demotion is OK, if rel_a doesn't produce + # rows, then the rel_a__col__gte=0 clause can't be true, and thus + # the whole clause must be false. So, it is safe to use INNER + # join. + # Note that in this example we could just as well have the __gte + # clause and the OR clause swapped. Or we could replace the __gte + # clause with an OR clause containing rel_a__col=1|rel_a__col=2, + # and again we could safely demote to INNER. + query.promote_joins(to_promote) + query.demote_joins(to_demote) + return to_demote diff --git a/testbed/django__django/django/db/models/sql/subqueries.py b/testbed/django__django/django/db/models/sql/subqueries.py new file mode 100644 index 0000000000000000000000000000000000000000..d8a246d36968f516b54f400d79e5bd2fd1a6e2d9 --- /dev/null +++ b/testbed/django__django/django/db/models/sql/subqueries.py @@ -0,0 +1,171 @@ +""" +Query subclasses which provide extra functionality beyond simple data retrieval. +""" + +from django.core.exceptions import FieldError +from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS +from django.db.models.sql.query import Query + +__all__ = ["DeleteQuery", "UpdateQuery", "InsertQuery", "AggregateQuery"] + + +class DeleteQuery(Query): + """A DELETE SQL query.""" + + compiler = "SQLDeleteCompiler" + + def do_query(self, table, where, using): + self.alias_map = {table: self.alias_map[table]} + self.where = where + cursor = self.get_compiler(using).execute_sql(CURSOR) + if cursor: + with cursor: + return cursor.rowcount + return 0 + + def delete_batch(self, pk_list, using): + """ + Set up and execute delete queries for all the objects in pk_list. + + More than one physical query may be executed if there are a + lot of values in pk_list. + """ + # number of objects deleted + num_deleted = 0 + field = self.get_meta().pk + for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): + self.clear_where() + self.add_filter( + f"{field.attname}__in", + pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE], + ) + num_deleted += self.do_query( + self.get_meta().db_table, self.where, using=using + ) + return num_deleted + + +class UpdateQuery(Query): + """An UPDATE SQL query.""" + + compiler = "SQLUpdateCompiler" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._setup_query() + + def _setup_query(self): + """ + Run on initialization and at the end of chaining. Any attributes that + would normally be set in __init__() should go here instead. + """ + self.values = [] + self.related_ids = None + self.related_updates = {} + + def clone(self): + obj = super().clone() + obj.related_updates = self.related_updates.copy() + return obj + + def update_batch(self, pk_list, values, using): + self.add_update_values(values) + for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): + self.clear_where() + self.add_filter( + "pk__in", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE] + ) + self.get_compiler(using).execute_sql(NO_RESULTS) + + def add_update_values(self, values): + """ + Convert a dictionary of field name to value mappings into an update + query. This is the entry point for the public update() method on + querysets. + """ + values_seq = [] + for name, val in values.items(): + field = self.get_meta().get_field(name) + direct = ( + not (field.auto_created and not field.concrete) or not field.concrete + ) + model = field.model._meta.concrete_model + if not direct or (field.is_relation and field.many_to_many): + raise FieldError( + "Cannot update model field %r (only non-relations and " + "foreign keys permitted)." % field + ) + if model is not self.get_meta().concrete_model: + self.add_related_update(model, field, val) + continue + values_seq.append((field, model, val)) + return self.add_update_fields(values_seq) + + def add_update_fields(self, values_seq): + """ + Append a sequence of (field, model, value) triples to the internal list + that will be used to generate the UPDATE query. Might be more usefully + called add_update_targets() to hint at the extra information here. + """ + for field, model, val in values_seq: + if hasattr(val, "resolve_expression"): + # Resolve expressions here so that annotations are no longer needed + val = val.resolve_expression(self, allow_joins=False, for_save=True) + self.values.append((field, model, val)) + + def add_related_update(self, model, field, value): + """ + Add (name, value) to an update query for an ancestor model. + + Update are coalesced so that only one update query per ancestor is run. + """ + self.related_updates.setdefault(model, []).append((field, None, value)) + + def get_related_updates(self): + """ + Return a list of query objects: one for each update required to an + ancestor model. Each query will have the same filtering conditions as + the current query but will only update a single table. + """ + if not self.related_updates: + return [] + result = [] + for model, values in self.related_updates.items(): + query = UpdateQuery(model) + query.values = values + if self.related_ids is not None: + query.add_filter("pk__in", self.related_ids[model]) + result.append(query) + return result + + +class InsertQuery(Query): + compiler = "SQLInsertCompiler" + + def __init__( + self, *args, on_conflict=None, update_fields=None, unique_fields=None, **kwargs + ): + super().__init__(*args, **kwargs) + self.fields = [] + self.objs = [] + self.on_conflict = on_conflict + self.update_fields = update_fields or [] + self.unique_fields = unique_fields or [] + + def insert_values(self, fields, objs, raw=False): + self.fields = fields + self.objs = objs + self.raw = raw + + +class AggregateQuery(Query): + """ + Take another query as a parameter to the FROM clause and only select the + elements in the provided list. + """ + + compiler = "SQLAggregateCompiler" + + def __init__(self, model, inner_query): + self.inner_query = inner_query + super().__init__(model) diff --git a/testbed/django__django/django/db/models/sql/where.py b/testbed/django__django/django/db/models/sql/where.py new file mode 100644 index 0000000000000000000000000000000000000000..2f23a2932ce590b5c895b4ef252bfa1ca4d6518b --- /dev/null +++ b/testbed/django__django/django/db/models/sql/where.py @@ -0,0 +1,360 @@ +""" +Code to manage the creation and SQL rendering of 'where' constraints. +""" +import operator +from functools import reduce + +from django.core.exceptions import EmptyResultSet, FullResultSet +from django.db.models.expressions import Case, When +from django.db.models.functions import Mod +from django.db.models.lookups import Exact +from django.utils import tree +from django.utils.functional import cached_property + +# Connection types +AND = "AND" +OR = "OR" +XOR = "XOR" + + +class WhereNode(tree.Node): + """ + An SQL WHERE clause. + + The class is tied to the Query class that created it (in order to create + the correct SQL). + + A child is usually an expression producing boolean values. Most likely the + expression is a Lookup instance. + + However, a child could also be any class with as_sql() and either + relabeled_clone() method or relabel_aliases() and clone() methods and + contains_aggregate attribute. + """ + + default = AND + resolved = False + conditional = True + + def split_having_qualify(self, negated=False, must_group_by=False): + """ + Return three possibly None nodes: one for those parts of self that + should be included in the WHERE clause, one for those parts of self + that must be included in the HAVING clause, and one for those parts + that refer to window functions. + """ + if not self.contains_aggregate and not self.contains_over_clause: + return self, None, None + in_negated = negated ^ self.negated + # Whether or not children must be connected in the same filtering + # clause (WHERE > HAVING > QUALIFY) to maintain logical semantic. + must_remain_connected = ( + (in_negated and self.connector == AND) + or (not in_negated and self.connector == OR) + or self.connector == XOR + ) + if ( + must_remain_connected + and self.contains_aggregate + and not self.contains_over_clause + ): + # It's must cheaper to short-circuit and stash everything in the + # HAVING clause than split children if possible. + return None, self, None + where_parts = [] + having_parts = [] + qualify_parts = [] + for c in self.children: + if hasattr(c, "split_having_qualify"): + where_part, having_part, qualify_part = c.split_having_qualify( + in_negated, must_group_by + ) + if where_part is not None: + where_parts.append(where_part) + if having_part is not None: + having_parts.append(having_part) + if qualify_part is not None: + qualify_parts.append(qualify_part) + elif c.contains_over_clause: + qualify_parts.append(c) + elif c.contains_aggregate: + having_parts.append(c) + else: + where_parts.append(c) + if must_remain_connected and qualify_parts: + # Disjunctive heterogeneous predicates can be pushed down to + # qualify as long as no conditional aggregation is involved. + if not where_parts or (where_parts and not must_group_by): + return None, None, self + elif where_parts: + # In theory this should only be enforced when dealing with + # where_parts containing predicates against multi-valued + # relationships that could affect aggregation results but this + # is complex to infer properly. + raise NotImplementedError( + "Heterogeneous disjunctive predicates against window functions are " + "not implemented when performing conditional aggregation." + ) + where_node = ( + self.create(where_parts, self.connector, self.negated) + if where_parts + else None + ) + having_node = ( + self.create(having_parts, self.connector, self.negated) + if having_parts + else None + ) + qualify_node = ( + self.create(qualify_parts, self.connector, self.negated) + if qualify_parts + else None + ) + return where_node, having_node, qualify_node + + def as_sql(self, compiler, connection): + """ + Return the SQL version of the where clause and the value to be + substituted in. Return '', [] if this node matches everything, + None, [] if this node is empty, and raise EmptyResultSet if this + node can't match anything. + """ + result = [] + result_params = [] + if self.connector == AND: + full_needed, empty_needed = len(self.children), 1 + else: + full_needed, empty_needed = 1, len(self.children) + + if self.connector == XOR and not connection.features.supports_logical_xor: + # Convert if the database doesn't support XOR: + # a XOR b XOR c XOR ... + # to: + # (a OR b OR c OR ...) AND MOD(a + b + c + ..., 2) == 1 + # The result of an n-ary XOR is true when an odd number of operands + # are true. + lhs = self.__class__(self.children, OR) + rhs_sum = reduce( + operator.add, + (Case(When(c, then=1), default=0) for c in self.children), + ) + if len(self.children) > 2: + rhs_sum = Mod(rhs_sum, 2) + rhs = Exact(1, rhs_sum) + return self.__class__([lhs, rhs], AND, self.negated).as_sql( + compiler, connection + ) + + for child in self.children: + try: + sql, params = compiler.compile(child) + except EmptyResultSet: + empty_needed -= 1 + except FullResultSet: + full_needed -= 1 + else: + if sql: + result.append(sql) + result_params.extend(params) + else: + full_needed -= 1 + # Check if this node matches nothing or everything. + # First check the amount of full nodes and empty nodes + # to make this node empty/full. + # Now, check if this node is full/empty using the + # counts. + if empty_needed == 0: + if self.negated: + raise FullResultSet + else: + raise EmptyResultSet + if full_needed == 0: + if self.negated: + raise EmptyResultSet + else: + raise FullResultSet + conn = " %s " % self.connector + sql_string = conn.join(result) + if not sql_string: + raise FullResultSet + if self.negated: + # Some backends (Oracle at least) need parentheses around the inner + # SQL in the negated case, even if the inner SQL contains just a + # single expression. + sql_string = "NOT (%s)" % sql_string + elif len(result) > 1 or self.resolved: + sql_string = "(%s)" % sql_string + return sql_string, result_params + + def get_group_by_cols(self): + cols = [] + for child in self.children: + cols.extend(child.get_group_by_cols()) + return cols + + def get_source_expressions(self): + return self.children[:] + + def set_source_expressions(self, children): + assert len(children) == len(self.children) + self.children = children + + def relabel_aliases(self, change_map): + """ + Relabel the alias values of any children. 'change_map' is a dictionary + mapping old (current) alias values to the new values. + """ + for pos, child in enumerate(self.children): + if hasattr(child, "relabel_aliases"): + # For example another WhereNode + child.relabel_aliases(change_map) + elif hasattr(child, "relabeled_clone"): + self.children[pos] = child.relabeled_clone(change_map) + + def clone(self): + clone = self.create(connector=self.connector, negated=self.negated) + for child in self.children: + if hasattr(child, "clone"): + child = child.clone() + clone.children.append(child) + return clone + + def relabeled_clone(self, change_map): + clone = self.clone() + clone.relabel_aliases(change_map) + return clone + + def replace_expressions(self, replacements): + if replacement := replacements.get(self): + return replacement + clone = self.create(connector=self.connector, negated=self.negated) + for child in self.children: + clone.children.append(child.replace_expressions(replacements)) + return clone + + def get_refs(self): + refs = set() + for child in self.children: + refs |= child.get_refs() + return refs + + @classmethod + def _contains_aggregate(cls, obj): + if isinstance(obj, tree.Node): + return any(cls._contains_aggregate(c) for c in obj.children) + return obj.contains_aggregate + + @cached_property + def contains_aggregate(self): + return self._contains_aggregate(self) + + @classmethod + def _contains_over_clause(cls, obj): + if isinstance(obj, tree.Node): + return any(cls._contains_over_clause(c) for c in obj.children) + return obj.contains_over_clause + + @cached_property + def contains_over_clause(self): + return self._contains_over_clause(self) + + @property + def is_summary(self): + return any(child.is_summary for child in self.children) + + @staticmethod + def _resolve_leaf(expr, query, *args, **kwargs): + if hasattr(expr, "resolve_expression"): + expr = expr.resolve_expression(query, *args, **kwargs) + return expr + + @classmethod + def _resolve_node(cls, node, query, *args, **kwargs): + if hasattr(node, "children"): + for child in node.children: + cls._resolve_node(child, query, *args, **kwargs) + if hasattr(node, "lhs"): + node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs) + if hasattr(node, "rhs"): + node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs) + + def resolve_expression(self, *args, **kwargs): + clone = self.clone() + clone._resolve_node(clone, *args, **kwargs) + clone.resolved = True + return clone + + @cached_property + def output_field(self): + from django.db.models import BooleanField + + return BooleanField() + + @property + def _output_field_or_none(self): + return self.output_field + + def select_format(self, compiler, sql, params): + # Wrap filters with a CASE WHEN expression if a database backend + # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP + # BY list. + if not compiler.connection.features.supports_boolean_expr_in_select_clause: + sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END" + return sql, params + + def get_db_converters(self, connection): + return self.output_field.get_db_converters(connection) + + def get_lookup(self, lookup): + return self.output_field.get_lookup(lookup) + + def leaves(self): + for child in self.children: + if isinstance(child, WhereNode): + yield from child.leaves() + else: + yield child + + +class NothingNode: + """A node that matches nothing.""" + + contains_aggregate = False + contains_over_clause = False + + def as_sql(self, compiler=None, connection=None): + raise EmptyResultSet + + +class ExtraWhere: + # The contents are a black box - assume no aggregates or windows are used. + contains_aggregate = False + contains_over_clause = False + + def __init__(self, sqls, params): + self.sqls = sqls + self.params = params + + def as_sql(self, compiler=None, connection=None): + sqls = ["(%s)" % sql for sql in self.sqls] + return " AND ".join(sqls), list(self.params or ()) + + +class SubqueryConstraint: + # Even if aggregates or windows would be used in a subquery, + # the outer query isn't interested about those. + contains_aggregate = False + contains_over_clause = False + + def __init__(self, alias, columns, targets, query_object): + self.alias = alias + self.columns = columns + self.targets = targets + query_object.clear_ordering(clear_default=True) + self.query_object = query_object + + def as_sql(self, compiler, connection): + query = self.query_object + query.set_values(self.targets) + query_compiler = query.get_compiler(connection=connection) + return query_compiler.as_subquery_condition(self.alias, self.columns, compiler) diff --git a/testbed/django__django/django/db/models/utils.py b/testbed/django__django/django/db/models/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c6cb5ef16593c4fcb4843c5d4a5627d1f64cafac --- /dev/null +++ b/testbed/django__django/django/db/models/utils.py @@ -0,0 +1,69 @@ +import functools +from collections import namedtuple + + +def make_model_tuple(model): + """ + Take a model or a string of the form "app_label.ModelName" and return a + corresponding ("app_label", "modelname") tuple. If a tuple is passed in, + assume it's a valid model tuple already and return it unchanged. + """ + try: + if isinstance(model, tuple): + model_tuple = model + elif isinstance(model, str): + app_label, model_name = model.split(".") + model_tuple = app_label, model_name.lower() + else: + model_tuple = model._meta.app_label, model._meta.model_name + assert len(model_tuple) == 2 + return model_tuple + except (ValueError, AssertionError): + raise ValueError( + "Invalid model reference '%s'. String model references " + "must be of the form 'app_label.ModelName'." % model + ) + + +def resolve_callables(mapping): + """ + Generate key/value pairs for the given mapping where the values are + evaluated if they're callable. + """ + for k, v in mapping.items(): + yield k, v() if callable(v) else v + + +def unpickle_named_row(names, values): + return create_namedtuple_class(*names)(*values) + + +@functools.lru_cache +def create_namedtuple_class(*names): + # Cache type() with @lru_cache since it's too slow to be called for every + # QuerySet evaluation. + def __reduce__(self): + return unpickle_named_row, (names, tuple(self)) + + return type( + "Row", + (namedtuple("Row", names),), + {"__reduce__": __reduce__, "__slots__": ()}, + ) + + +class AltersData: + """ + Make subclasses preserve the alters_data attribute on overridden methods. + """ + + def __init_subclass__(cls, **kwargs): + for fn_name, fn in vars(cls).items(): + if callable(fn) and not hasattr(fn, "alters_data"): + for base in cls.__bases__: + if base_fn := getattr(base, fn_name, None): + if hasattr(base_fn, "alters_data"): + fn.alters_data = base_fn.alters_data + break + + super().__init_subclass__(**kwargs) diff --git a/testbed/django__django/django/db/transaction.py b/testbed/django__django/django/db/transaction.py new file mode 100644 index 0000000000000000000000000000000000000000..4150cbcbbe29f301f907c8ca888ad00e10eb5797 --- /dev/null +++ b/testbed/django__django/django/db/transaction.py @@ -0,0 +1,340 @@ +from contextlib import ContextDecorator, contextmanager + +from django.db import ( + DEFAULT_DB_ALIAS, + DatabaseError, + Error, + ProgrammingError, + connections, +) + + +class TransactionManagementError(ProgrammingError): + """Transaction management is used improperly.""" + + pass + + +def get_connection(using=None): + """ + Get a database connection by name, or the default database connection + if no name is provided. This is a private API. + """ + if using is None: + using = DEFAULT_DB_ALIAS + return connections[using] + + +def get_autocommit(using=None): + """Get the autocommit status of the connection.""" + return get_connection(using).get_autocommit() + + +def set_autocommit(autocommit, using=None): + """Set the autocommit status of the connection.""" + return get_connection(using).set_autocommit(autocommit) + + +def commit(using=None): + """Commit a transaction.""" + get_connection(using).commit() + + +def rollback(using=None): + """Roll back a transaction.""" + get_connection(using).rollback() + + +def savepoint(using=None): + """ + Create a savepoint (if supported and required by the backend) inside the + current transaction. Return an identifier for the savepoint that will be + used for the subsequent rollback or commit. + """ + return get_connection(using).savepoint() + + +def savepoint_rollback(sid, using=None): + """ + Roll back the most recent savepoint (if one exists). Do nothing if + savepoints are not supported. + """ + get_connection(using).savepoint_rollback(sid) + + +def savepoint_commit(sid, using=None): + """ + Commit the most recent savepoint (if one exists). Do nothing if + savepoints are not supported. + """ + get_connection(using).savepoint_commit(sid) + + +def clean_savepoints(using=None): + """ + Reset the counter used to generate unique savepoint ids in this thread. + """ + get_connection(using).clean_savepoints() + + +def get_rollback(using=None): + """Get the "needs rollback" flag -- for *advanced use* only.""" + return get_connection(using).get_rollback() + + +def set_rollback(rollback, using=None): + """ + Set or unset the "needs rollback" flag -- for *advanced use* only. + + When `rollback` is `True`, trigger a rollback when exiting the innermost + enclosing atomic block that has `savepoint=True` (that's the default). Use + this to force a rollback without raising an exception. + + When `rollback` is `False`, prevent such a rollback. Use this only after + rolling back to a known-good state! Otherwise, you break the atomic block + and data corruption may occur. + """ + return get_connection(using).set_rollback(rollback) + + +@contextmanager +def mark_for_rollback_on_error(using=None): + """ + Internal low-level utility to mark a transaction as "needs rollback" when + an exception is raised while not enforcing the enclosed block to be in a + transaction. This is needed by Model.save() and friends to avoid starting a + transaction when in autocommit mode and a single query is executed. + + It's equivalent to: + + connection = get_connection(using) + if connection.get_autocommit(): + yield + else: + with transaction.atomic(using=using, savepoint=False): + yield + + but it uses low-level utilities to avoid performance overhead. + """ + try: + yield + except Exception as exc: + connection = get_connection(using) + if connection.in_atomic_block: + connection.needs_rollback = True + connection.rollback_exc = exc + raise + + +def on_commit(func, using=None, robust=False): + """ + Register `func` to be called when the current transaction is committed. + If the current transaction is rolled back, `func` will not be called. + """ + get_connection(using).on_commit(func, robust) + + +################################# +# Decorators / context managers # +################################# + + +class Atomic(ContextDecorator): + """ + Guarantee the atomic execution of a given block. + + An instance can be used either as a decorator or as a context manager. + + When it's used as a decorator, __call__ wraps the execution of the + decorated function in the instance itself, used as a context manager. + + When it's used as a context manager, __enter__ creates a transaction or a + savepoint, depending on whether a transaction is already in progress, and + __exit__ commits the transaction or releases the savepoint on normal exit, + and rolls back the transaction or to the savepoint on exceptions. + + It's possible to disable the creation of savepoints if the goal is to + ensure that some code runs within a transaction without creating overhead. + + A stack of savepoints identifiers is maintained as an attribute of the + connection. None denotes the absence of a savepoint. + + This allows reentrancy even if the same AtomicWrapper is reused. For + example, it's possible to define `oa = atomic('other')` and use `@oa` or + `with oa:` multiple times. + + Since database connections are thread-local, this is thread-safe. + + An atomic block can be tagged as durable. In this case, raise a + RuntimeError if it's nested within another atomic block. This guarantees + that database changes in a durable block are committed to the database when + the block exists without error. + + This is a private API. + """ + + def __init__(self, using, savepoint, durable): + self.using = using + self.savepoint = savepoint + self.durable = durable + self._from_testcase = False + + def __enter__(self): + connection = get_connection(self.using) + + if ( + self.durable + and connection.atomic_blocks + and not connection.atomic_blocks[-1]._from_testcase + ): + raise RuntimeError( + "A durable atomic block cannot be nested within another " + "atomic block." + ) + if not connection.in_atomic_block: + # Reset state when entering an outermost atomic block. + connection.commit_on_exit = True + connection.needs_rollback = False + if not connection.get_autocommit(): + # Pretend we're already in an atomic block to bypass the code + # that disables autocommit to enter a transaction, and make a + # note to deal with this case in __exit__. + connection.in_atomic_block = True + connection.commit_on_exit = False + + if connection.in_atomic_block: + # We're already in a transaction; create a savepoint, unless we + # were told not to or we're already waiting for a rollback. The + # second condition avoids creating useless savepoints and prevents + # overwriting needs_rollback until the rollback is performed. + if self.savepoint and not connection.needs_rollback: + sid = connection.savepoint() + connection.savepoint_ids.append(sid) + else: + connection.savepoint_ids.append(None) + else: + connection.set_autocommit( + False, force_begin_transaction_with_broken_autocommit=True + ) + connection.in_atomic_block = True + + if connection.in_atomic_block: + connection.atomic_blocks.append(self) + + def __exit__(self, exc_type, exc_value, traceback): + connection = get_connection(self.using) + + if connection.in_atomic_block: + connection.atomic_blocks.pop() + + if connection.savepoint_ids: + sid = connection.savepoint_ids.pop() + else: + # Prematurely unset this flag to allow using commit or rollback. + connection.in_atomic_block = False + + try: + if connection.closed_in_transaction: + # The database will perform a rollback by itself. + # Wait until we exit the outermost block. + pass + + elif exc_type is None and not connection.needs_rollback: + if connection.in_atomic_block: + # Release savepoint if there is one + if sid is not None: + try: + connection.savepoint_commit(sid) + except DatabaseError: + try: + connection.savepoint_rollback(sid) + # The savepoint won't be reused. Release it to + # minimize overhead for the database server. + connection.savepoint_commit(sid) + except Error: + # If rolling back to a savepoint fails, mark for + # rollback at a higher level and avoid shadowing + # the original exception. + connection.needs_rollback = True + raise + else: + # Commit transaction + try: + connection.commit() + except DatabaseError: + try: + connection.rollback() + except Error: + # An error during rollback means that something + # went wrong with the connection. Drop it. + connection.close() + raise + else: + # This flag will be set to True again if there isn't a savepoint + # allowing to perform the rollback at this level. + connection.needs_rollback = False + if connection.in_atomic_block: + # Roll back to savepoint if there is one, mark for rollback + # otherwise. + if sid is None: + connection.needs_rollback = True + else: + try: + connection.savepoint_rollback(sid) + # The savepoint won't be reused. Release it to + # minimize overhead for the database server. + connection.savepoint_commit(sid) + except Error: + # If rolling back to a savepoint fails, mark for + # rollback at a higher level and avoid shadowing + # the original exception. + connection.needs_rollback = True + else: + # Roll back transaction + try: + connection.rollback() + except Error: + # An error during rollback means that something + # went wrong with the connection. Drop it. + connection.close() + + finally: + # Outermost block exit when autocommit was enabled. + if not connection.in_atomic_block: + if connection.closed_in_transaction: + connection.connection = None + else: + connection.set_autocommit(True) + # Outermost block exit when autocommit was disabled. + elif not connection.savepoint_ids and not connection.commit_on_exit: + if connection.closed_in_transaction: + connection.connection = None + else: + connection.in_atomic_block = False + + +def atomic(using=None, savepoint=True, durable=False): + # Bare decorator: @atomic -- although the first argument is called + # `using`, it's actually the function being decorated. + if callable(using): + return Atomic(DEFAULT_DB_ALIAS, savepoint, durable)(using) + # Decorator: @atomic(...) or context manager: with atomic(...): ... + else: + return Atomic(using, savepoint, durable) + + +def _non_atomic_requests(view, using): + try: + view._non_atomic_requests.add(using) + except AttributeError: + view._non_atomic_requests = {using} + return view + + +def non_atomic_requests(using=None): + if callable(using): + return _non_atomic_requests(using, DEFAULT_DB_ALIAS) + else: + if using is None: + using = DEFAULT_DB_ALIAS + return lambda view: _non_atomic_requests(view, using) diff --git a/testbed/django__django/django/dispatch/__init__.py b/testbed/django__django/django/dispatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a615f9905afe67cc5ba02447e1f5a269e38da085 --- /dev/null +++ b/testbed/django__django/django/dispatch/__init__.py @@ -0,0 +1,9 @@ +"""Multi-consumer multi-producer dispatching mechanism + +Originally based on pydispatch (BSD) https://pypi.org/project/PyDispatcher/2.0.1/ +See license.txt for original license. + +Heavily modified for Django's purposes. +""" + +from django.dispatch.dispatcher import Signal, receiver # NOQA diff --git a/testbed/django__django/django/dispatch/dispatcher.py b/testbed/django__django/django/dispatch/dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..26ef09ce49ae531e851a95fb42b33971147978d7 --- /dev/null +++ b/testbed/django__django/django/dispatch/dispatcher.py @@ -0,0 +1,490 @@ +import asyncio +import logging +import threading +import weakref + +from asgiref.sync import async_to_sync, iscoroutinefunction, sync_to_async + +from django.utils.inspect import func_accepts_kwargs + +logger = logging.getLogger("django.dispatch") + + +def _make_id(target): + if hasattr(target, "__func__"): + return (id(target.__self__), id(target.__func__)) + return id(target) + + +NONE_ID = _make_id(None) + +# A marker for caching +NO_RECEIVERS = object() + + +class Signal: + """ + Base class for all signals + + Internal attributes: + + receivers + { receiverkey (id) : weakref(receiver) } + """ + + def __init__(self, use_caching=False): + """ + Create a new signal. + """ + self.receivers = [] + self.lock = threading.Lock() + self.use_caching = use_caching + # For convenience we create empty caches even if they are not used. + # A note about caching: if use_caching is defined, then for each + # distinct sender we cache the receivers that sender has in + # 'sender_receivers_cache'. The cache is cleaned when .connect() or + # .disconnect() is called and populated on send(). + self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {} + self._dead_receivers = False + + def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): + """ + Connect receiver to sender for signal. + + Arguments: + + receiver + A function or an instance method which is to receive signals. + Receivers must be hashable objects. Receivers can be + asynchronous. + + If weak is True, then receiver must be weak referenceable. + + Receivers must be able to accept keyword arguments. + + If a receiver is connected with a dispatch_uid argument, it + will not be added if another receiver was already connected + with that dispatch_uid. + + sender + The sender to which the receiver should respond. Must either be + a Python object, or None to receive events from any sender. + + weak + Whether to use weak references to the receiver. By default, the + module will attempt to use weak references to the receiver + objects. If this parameter is false, then strong references will + be used. + + dispatch_uid + An identifier used to uniquely identify a particular instance of + a receiver. This will usually be a string, though it may be + anything hashable. + """ + from django.conf import settings + + # If DEBUG is on, check that we got a good receiver + if settings.configured and settings.DEBUG: + if not callable(receiver): + raise TypeError("Signal receivers must be callable.") + # Check for **kwargs + if not func_accepts_kwargs(receiver): + raise ValueError( + "Signal receivers must accept keyword arguments (**kwargs)." + ) + + if dispatch_uid: + lookup_key = (dispatch_uid, _make_id(sender)) + else: + lookup_key = (_make_id(receiver), _make_id(sender)) + + is_async = iscoroutinefunction(receiver) + + if weak: + ref = weakref.ref + receiver_object = receiver + # Check for bound methods + if hasattr(receiver, "__self__") and hasattr(receiver, "__func__"): + ref = weakref.WeakMethod + receiver_object = receiver.__self__ + receiver = ref(receiver) + weakref.finalize(receiver_object, self._remove_receiver) + + with self.lock: + self._clear_dead_receivers() + if not any(r_key == lookup_key for r_key, _, _ in self.receivers): + self.receivers.append((lookup_key, receiver, is_async)) + self.sender_receivers_cache.clear() + + def disconnect(self, receiver=None, sender=None, dispatch_uid=None): + """ + Disconnect receiver from sender for signal. + + If weak references are used, disconnect need not be called. The receiver + will be removed from dispatch automatically. + + Arguments: + + receiver + The registered receiver to disconnect. May be none if + dispatch_uid is specified. + + sender + The registered sender to disconnect + + dispatch_uid + the unique identifier of the receiver to disconnect + """ + if dispatch_uid: + lookup_key = (dispatch_uid, _make_id(sender)) + else: + lookup_key = (_make_id(receiver), _make_id(sender)) + + disconnected = False + with self.lock: + self._clear_dead_receivers() + for index in range(len(self.receivers)): + r_key, *_ = self.receivers[index] + if r_key == lookup_key: + disconnected = True + del self.receivers[index] + break + self.sender_receivers_cache.clear() + return disconnected + + def has_listeners(self, sender=None): + sync_receivers, async_receivers = self._live_receivers(sender) + return bool(sync_receivers) or bool(async_receivers) + + def send(self, sender, **named): + """ + Send signal from sender to all connected receivers. + + If any receiver raises an error, the error propagates back through send, + terminating the dispatch loop. So it's possible that all receivers + won't be called if an error is raised. + + If any receivers are asynchronous, they are called after all the + synchronous receivers via a single call to async_to_sync(). They are + also executed concurrently with asyncio.gather(). + + Arguments: + + sender + The sender of the signal. Either a specific object or None. + + named + Named arguments which will be passed to receivers. + + Return a list of tuple pairs [(receiver, response), ... ]. + """ + if ( + not self.receivers + or self.sender_receivers_cache.get(sender) is NO_RECEIVERS + ): + return [] + responses = [] + sync_receivers, async_receivers = self._live_receivers(sender) + for receiver in sync_receivers: + response = receiver(signal=self, sender=sender, **named) + responses.append((receiver, response)) + if async_receivers: + + async def asend(): + async_responses = await asyncio.gather( + *( + receiver(signal=self, sender=sender, **named) + for receiver in async_receivers + ) + ) + return zip(async_receivers, async_responses) + + responses.extend(async_to_sync(asend)()) + return responses + + async def asend(self, sender, **named): + """ + Send signal from sender to all connected receivers in async mode. + + All sync receivers will be wrapped by sync_to_async() + If any receiver raises an error, the error propagates back through + send, terminating the dispatch loop. So it's possible that all + receivers won't be called if an error is raised. + + If any receivers are synchronous, they are grouped and called behind a + sync_to_async() adaption before executing any asynchronous receivers. + + If any receivers are asynchronous, they are grouped and executed + concurrently with asyncio.gather(). + + Arguments: + + sender + The sender of the signal. Either a specific object or None. + + named + Named arguments which will be passed to receivers. + + Return a list of tuple pairs [(receiver, response), ...]. + """ + if ( + not self.receivers + or self.sender_receivers_cache.get(sender) is NO_RECEIVERS + ): + return [] + sync_receivers, async_receivers = self._live_receivers(sender) + if sync_receivers: + + @sync_to_async + def sync_send(): + responses = [] + for receiver in sync_receivers: + response = receiver(signal=self, sender=sender, **named) + responses.append((receiver, response)) + return responses + + else: + sync_send = list + + responses, async_responses = await asyncio.gather( + sync_send(), + asyncio.gather( + *( + receiver(signal=self, sender=sender, **named) + for receiver in async_receivers + ) + ), + ) + responses.extend(zip(async_receivers, async_responses)) + return responses + + def _log_robust_failure(self, receiver, err): + logger.error( + "Error calling %s in Signal.send_robust() (%s)", + receiver.__qualname__, + err, + exc_info=err, + ) + + def send_robust(self, sender, **named): + """ + Send signal from sender to all connected receivers catching errors. + + If any receivers are asynchronous, they are called after all the + synchronous receivers via a single call to async_to_sync(). They are + also executed concurrently with asyncio.gather(). + + Arguments: + + sender + The sender of the signal. Can be any Python object (normally one + registered with a connect if you actually want something to + occur). + + named + Named arguments which will be passed to receivers. + + Return a list of tuple pairs [(receiver, response), ... ]. + + If any receiver raises an error (specifically any subclass of + Exception), return the error instance as the result for that receiver. + """ + if ( + not self.receivers + or self.sender_receivers_cache.get(sender) is NO_RECEIVERS + ): + return [] + + # Call each receiver with whatever arguments it can accept. + # Return a list of tuple pairs [(receiver, response), ... ]. + responses = [] + sync_receivers, async_receivers = self._live_receivers(sender) + for receiver in sync_receivers: + try: + response = receiver(signal=self, sender=sender, **named) + except Exception as err: + self._log_robust_failure(receiver, err) + responses.append((receiver, err)) + else: + responses.append((receiver, response)) + if async_receivers: + + async def asend_and_wrap_exception(receiver): + try: + response = await receiver(signal=self, sender=sender, **named) + except Exception as err: + self._log_robust_failure(receiver, err) + return err + return response + + async def asend(): + async_responses = await asyncio.gather( + *( + asend_and_wrap_exception(receiver) + for receiver in async_receivers + ) + ) + return zip(async_receivers, async_responses) + + responses.extend(async_to_sync(asend)()) + return responses + + async def asend_robust(self, sender, **named): + """ + Send signal from sender to all connected receivers catching errors. + + If any receivers are synchronous, they are grouped and called behind a + sync_to_async() adaption before executing any asynchronous receivers. + + If any receivers are asynchronous, they are grouped and executed + concurrently with asyncio.gather. + + Arguments: + + sender + The sender of the signal. Can be any Python object (normally one + registered with a connect if you actually want something to + occur). + + named + Named arguments which will be passed to receivers. + + Return a list of tuple pairs [(receiver, response), ... ]. + + If any receiver raises an error (specifically any subclass of + Exception), return the error instance as the result for that receiver. + """ + if ( + not self.receivers + or self.sender_receivers_cache.get(sender) is NO_RECEIVERS + ): + return [] + + # Call each receiver with whatever arguments it can accept. + # Return a list of tuple pairs [(receiver, response), ... ]. + sync_receivers, async_receivers = self._live_receivers(sender) + + if sync_receivers: + + @sync_to_async + def sync_send(): + responses = [] + for receiver in sync_receivers: + try: + response = receiver(signal=self, sender=sender, **named) + except Exception as err: + self._log_robust_failure(receiver, err) + responses.append((receiver, err)) + else: + responses.append((receiver, response)) + return responses + + else: + sync_send = list + + async def asend_and_wrap_exception(receiver): + try: + response = await receiver(signal=self, sender=sender, **named) + except Exception as err: + self._log_robust_failure(receiver, err) + return err + return response + + responses, async_responses = await asyncio.gather( + sync_send(), + asyncio.gather( + *(asend_and_wrap_exception(receiver) for receiver in async_receivers), + ), + ) + responses.extend(zip(async_receivers, async_responses)) + return responses + + def _clear_dead_receivers(self): + # Note: caller is assumed to hold self.lock. + if self._dead_receivers: + self._dead_receivers = False + self.receivers = [ + r + for r in self.receivers + if not (isinstance(r[1], weakref.ReferenceType) and r[1]() is None) + ] + + def _live_receivers(self, sender): + """ + Filter sequence of receivers to get resolved, live receivers. + + This checks for weak references and resolves them, then returning only + live receivers. + """ + receivers = None + if self.use_caching and not self._dead_receivers: + receivers = self.sender_receivers_cache.get(sender) + # We could end up here with NO_RECEIVERS even if we do check this case in + # .send() prior to calling _live_receivers() due to concurrent .send() call. + if receivers is NO_RECEIVERS: + return [], [] + if receivers is None: + with self.lock: + self._clear_dead_receivers() + senderkey = _make_id(sender) + receivers = [] + for (_receiverkey, r_senderkey), receiver, is_async in self.receivers: + if r_senderkey == NONE_ID or r_senderkey == senderkey: + receivers.append((receiver, is_async)) + if self.use_caching: + if not receivers: + self.sender_receivers_cache[sender] = NO_RECEIVERS + else: + # Note, we must cache the weakref versions. + self.sender_receivers_cache[sender] = receivers + non_weak_sync_receivers = [] + non_weak_async_receivers = [] + for receiver, is_async in receivers: + if isinstance(receiver, weakref.ReferenceType): + # Dereference the weak reference. + receiver = receiver() + if receiver is not None: + if is_async: + non_weak_async_receivers.append(receiver) + else: + non_weak_sync_receivers.append(receiver) + else: + if is_async: + non_weak_async_receivers.append(receiver) + else: + non_weak_sync_receivers.append(receiver) + return non_weak_sync_receivers, non_weak_async_receivers + + def _remove_receiver(self, receiver=None): + # Mark that the self.receivers list has dead weakrefs. If so, we will + # clean those up in connect, disconnect and _live_receivers while + # holding self.lock. Note that doing the cleanup here isn't a good + # idea, _remove_receiver() will be called as side effect of garbage + # collection, and so the call can happen while we are already holding + # self.lock. + self._dead_receivers = True + + +def receiver(signal, **kwargs): + """ + A decorator for connecting receivers to signals. Used by passing in the + signal (or list of signals) and keyword arguments to connect:: + + @receiver(post_save, sender=MyModel) + def signal_receiver(sender, **kwargs): + ... + + @receiver([post_save, post_delete], sender=MyModel) + def signals_receiver(sender, **kwargs): + ... + """ + + def _decorator(func): + if isinstance(signal, (list, tuple)): + for s in signal: + s.connect(func, **kwargs) + else: + signal.connect(func, **kwargs) + return func + + return _decorator diff --git a/testbed/django__django/django/dispatch/license.txt b/testbed/django__django/django/dispatch/license.txt new file mode 100644 index 0000000000000000000000000000000000000000..505090dff94ed39b6d6689cb6af1f9b8b48e53c1 --- /dev/null +++ b/testbed/django__django/django/dispatch/license.txt @@ -0,0 +1,36 @@ +django.dispatch was originally forked from PyDispatcher. + +PyDispatcher License: + + Copyright (c) 2001-2003, Patrick K. O'Brien and Contributors + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + The name of Patrick K. O'Brien, or the name of any Contributor, + may not be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/testbed/django__django/django/forms/jinja2/django/forms/errors/dict/text.txt b/testbed/django__django/django/forms/jinja2/django/forms/errors/dict/text.txt new file mode 100644 index 0000000000000000000000000000000000000000..dc9fd80c99ce0182ad391c4455d873f7ecc98f72 --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/errors/dict/text.txt @@ -0,0 +1,3 @@ +{% for field, errors in errors %}* {{ field }} +{% for error in errors %} * {{ error }} +{% endfor %}{% endfor %} diff --git a/testbed/django__django/django/forms/jinja2/django/forms/errors/dict/ul.html b/testbed/django__django/django/forms/jinja2/django/forms/errors/dict/ul.html new file mode 100644 index 0000000000000000000000000000000000000000..c16fd6591450db3d6ae4b4d34ed82250a97c53b5 --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/errors/dict/ul.html @@ -0,0 +1 @@ +{% if errors %}
    {% for field, error in errors %}
  • {{ field }}{{ error }}
  • {% endfor %}
{% endif %} diff --git a/testbed/django__django/django/forms/jinja2/django/forms/errors/list/default.html b/testbed/django__django/django/forms/jinja2/django/forms/errors/list/default.html new file mode 100644 index 0000000000000000000000000000000000000000..fccc328188da35d24a130fce380c95982ed051a8 --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/errors/list/default.html @@ -0,0 +1 @@ +{% include "django/forms/errors/list/ul.html" %} diff --git a/testbed/django__django/django/forms/jinja2/django/forms/formsets/p.html b/testbed/django__django/django/forms/jinja2/django/forms/formsets/p.html new file mode 100644 index 0000000000000000000000000000000000000000..3ed889e6df017cc00cc91c4ca5777b6cac1c9a9a --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/formsets/p.html @@ -0,0 +1 @@ +{{ formset.management_form }}{% for form in formset %}{{ form.as_p() }}{% endfor %} diff --git a/testbed/django__django/django/forms/jinja2/django/forms/formsets/table.html b/testbed/django__django/django/forms/jinja2/django/forms/formsets/table.html new file mode 100644 index 0000000000000000000000000000000000000000..25033775b0c2ab2f149f9c2896d94237dab85ad9 --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/formsets/table.html @@ -0,0 +1 @@ +{{ formset.management_form }}{% for form in formset %}{{ form.as_table() }}{% endfor %} diff --git a/testbed/django__django/django/forms/jinja2/django/forms/formsets/ul.html b/testbed/django__django/django/forms/jinja2/django/forms/formsets/ul.html new file mode 100644 index 0000000000000000000000000000000000000000..335e91e0e6b37fecfeaefbf30bbf0a1a0aec0cc1 --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/formsets/ul.html @@ -0,0 +1 @@ +{{ formset.management_form }}{% for form in formset %}{{ form.as_ul() }}{% endfor %} diff --git a/testbed/django__django/django/forms/jinja2/django/forms/widgets/attrs.html b/testbed/django__django/django/forms/jinja2/django/forms/widgets/attrs.html new file mode 100644 index 0000000000000000000000000000000000000000..76926d79fe7e2164a8b879b71377f9b0b0a82a2f --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/widgets/attrs.html @@ -0,0 +1 @@ +{% for name, value in widget.attrs.items() %}{% if value is not sameas False %} {{ name }}{% if value is not sameas True %}="{{ value }}"{% endif %}{% endif %}{% endfor %} diff --git a/testbed/django__django/django/forms/jinja2/django/forms/widgets/multiwidget.html b/testbed/django__django/django/forms/jinja2/django/forms/widgets/multiwidget.html new file mode 100644 index 0000000000000000000000000000000000000000..ae120e91f558e4a7c42b77f796b4cdcb72d6175e --- /dev/null +++ b/testbed/django__django/django/forms/jinja2/django/forms/widgets/multiwidget.html @@ -0,0 +1 @@ +{% for widget in widget.subwidgets -%}{% include widget.template_name %}{%- endfor %}