diff --git a/.gitattributes b/.gitattributes index a1f18080c8ec36cd57573f5572753d79ad2f7fa7..b1fc534619dfe93b44491f477e0492896557ddb4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -152,3 +152,4 @@ A-news-Agrregation-system-master/myvenv/share/python-wheels/pip-9.0.1-py2.py3-no A-news-Agrregation-system-master/myvenv/share/python-wheels/pkg_resources-0.0.0-py2.py3-none-any.whl filter=lfs diff=lfs merge=lfs -text A-news-Agrregation-system-master/myvenv/share/python-wheels/setuptools-39.0.1-py2.py3-none-any.whl filter=lfs diff=lfs merge=lfs -text A-news-Agrregation-system-master/myvenv/share/python-wheels/urllib3-1.22-py2.py3-none-any.whl filter=lfs diff=lfs merge=lfs -text +A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/pip/_vendor/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/sites/locale/zh_Hant/LC_MESSAGES/django.po b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/sites/locale/zh_Hant/LC_MESSAGES/django.po new file mode 100644 index 0000000000000000000000000000000000000000..d1a5583f8cde78f327cad686afbed490a6d4279e --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/sites/locale/zh_Hant/LC_MESSAGES/django.po @@ -0,0 +1,39 @@ +# This file is distributed under the same license as the Django package. +# +# Translators: +# Chen Chun-Chia , 2015 +# Jannis Leidel , 2011 +# mail6543210 , 2013 +# Tzu-ping Chung , 2016 +msgid "" +msgstr "" +"Project-Id-Version: django\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2015-01-17 11:07+0100\n" +"PO-Revision-Date: 2017-09-19 16:40+0000\n" +"Last-Translator: Tzu-ping Chung \n" +"Language-Team: Chinese (Taiwan) (http://www.transifex.com/django/django/" +"language/zh_TW/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_TW\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +msgid "Sites" +msgstr "網站" + +msgid "The domain name cannot contain any spaces or tabs." +msgstr "網域名稱不能包含空格或定位字元。" + +msgid "domain name" +msgstr "網域名稱" + +msgid "display name" +msgstr "顯示名稱" + +msgid "site" +msgstr "網站" + +msgid "sites" +msgstr "網站" diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/apps.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/apps.py new file mode 100644 index 0000000000000000000000000000000000000000..adf9f7e57d938c2ddeb7fc30e97d398a25154fa2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/apps.py @@ -0,0 +1,13 @@ +from django.apps import AppConfig +from django.contrib.staticfiles.checks import check_finders +from django.core import checks +from django.utils.translation import gettext_lazy as _ + + +class StaticFilesConfig(AppConfig): + name = 'django.contrib.staticfiles' + verbose_name = _("Static Files") + ignore_patterns = ['CVS', '.*', '*~'] + + def ready(self): + checks.register(check_finders, 'staticfiles') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/checks.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/checks.py new file mode 100644 index 0000000000000000000000000000000000000000..fb57bf726db357d2d5cfeb1444248b6f1e66d15c --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/checks.py @@ -0,0 +1,14 @@ +from django.contrib.staticfiles.finders import get_finders + + +def check_finders(app_configs=None, **kwargs): + """Check all registered staticfiles finders.""" + errors = [] + for finder in get_finders(): + try: + finder_errors = finder.check() + except NotImplementedError: + pass + else: + errors.extend(finder_errors) + return errors diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/finders.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/finders.py new file mode 100644 index 0000000000000000000000000000000000000000..afce4ca637a2a0101cb2fe1bd2fab18a90da5564 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/finders.py @@ -0,0 +1,290 @@ +import functools +import os +from collections import OrderedDict + +from django.apps import apps +from django.conf import settings +from django.contrib.staticfiles import utils +from django.core.checks import Error +from django.core.exceptions import ImproperlyConfigured +from django.core.files.storage import ( + FileSystemStorage, Storage, default_storage, +) +from django.utils._os import safe_join +from django.utils.functional import LazyObject, empty +from django.utils.module_loading import import_string + +# To keep track on which directories the finder has searched the static files. +searched_locations = [] + + +class BaseFinder: + """ + A base file finder to be used for custom staticfiles finder classes. + """ + def check(self, **kwargs): + raise NotImplementedError( + 'subclasses may provide a check() method to verify the finder is ' + 'configured correctly.' + ) + + def find(self, path, all=False): + """ + Given a relative file path, find an absolute file path. + + If the ``all`` parameter is False (default) return only the first found + file path; if True, return a list of all found files paths. + """ + raise NotImplementedError('subclasses of BaseFinder must provide a find() method') + + def list(self, ignore_patterns): + """ + Given an optional list of paths to ignore, return a two item iterable + consisting of the relative path and storage instance. + """ + raise NotImplementedError('subclasses of BaseFinder must provide a list() method') + + +class FileSystemFinder(BaseFinder): + """ + A static files finder that uses the ``STATICFILES_DIRS`` setting + to locate files. + """ + def __init__(self, app_names=None, *args, **kwargs): + # List of locations with static files + self.locations = [] + # Maps dir paths to an appropriate storage instance + self.storages = OrderedDict() + for root in settings.STATICFILES_DIRS: + if isinstance(root, (list, tuple)): + prefix, root = root + else: + prefix = '' + if (prefix, root) not in self.locations: + self.locations.append((prefix, root)) + for prefix, root in self.locations: + filesystem_storage = FileSystemStorage(location=root) + filesystem_storage.prefix = prefix + self.storages[root] = filesystem_storage + super().__init__(*args, **kwargs) + + def check(self, **kwargs): + errors = [] + if not isinstance(settings.STATICFILES_DIRS, (list, tuple)): + errors.append(Error( + 'The STATICFILES_DIRS setting is not a tuple or list.', + hint='Perhaps you forgot a trailing comma?', + id='staticfiles.E001', + )) + for root in settings.STATICFILES_DIRS: + if isinstance(root, (list, tuple)): + _, root = root + if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root): + errors.append(Error( + 'The STATICFILES_DIRS setting should not contain the ' + 'STATIC_ROOT setting.', + id='staticfiles.E002', + )) + return errors + + def find(self, path, all=False): + """ + Look for files in the extra locations as defined in STATICFILES_DIRS. + """ + matches = [] + for prefix, root in self.locations: + if root not in searched_locations: + searched_locations.append(root) + matched_path = self.find_location(root, path, prefix) + if matched_path: + if not all: + return matched_path + matches.append(matched_path) + return matches + + def find_location(self, root, path, prefix=None): + """ + Find a requested static file in a location and return the found + absolute path (or ``None`` if no match). + """ + if prefix: + prefix = '%s%s' % (prefix, os.sep) + if not path.startswith(prefix): + return None + path = path[len(prefix):] + path = safe_join(root, path) + if os.path.exists(path): + return path + + def list(self, ignore_patterns): + """ + List all files in all locations. + """ + for prefix, root in self.locations: + storage = self.storages[root] + for path in utils.get_files(storage, ignore_patterns): + yield path, storage + + +class AppDirectoriesFinder(BaseFinder): + """ + A static files finder that looks in the directory of each app as + specified in the source_dir attribute. + """ + storage_class = FileSystemStorage + source_dir = 'static' + + def __init__(self, app_names=None, *args, **kwargs): + # The list of apps that are handled + self.apps = [] + # Mapping of app names to storage instances + self.storages = OrderedDict() + app_configs = apps.get_app_configs() + if app_names: + app_names = set(app_names) + app_configs = [ac for ac in app_configs if ac.name in app_names] + for app_config in app_configs: + app_storage = self.storage_class( + os.path.join(app_config.path, self.source_dir)) + if os.path.isdir(app_storage.location): + self.storages[app_config.name] = app_storage + if app_config.name not in self.apps: + self.apps.append(app_config.name) + super().__init__(*args, **kwargs) + + def list(self, ignore_patterns): + """ + List all files in all app storages. + """ + for storage in self.storages.values(): + if storage.exists(''): # check if storage location exists + for path in utils.get_files(storage, ignore_patterns): + yield path, storage + + def find(self, path, all=False): + """ + Look for files in the app directories. + """ + matches = [] + for app in self.apps: + app_location = self.storages[app].location + if app_location not in searched_locations: + searched_locations.append(app_location) + match = self.find_in_app(app, path) + if match: + if not all: + return match + matches.append(match) + return matches + + def find_in_app(self, app, path): + """ + Find a requested static file in an app's static locations. + """ + storage = self.storages.get(app) + if storage: + # only try to find a file if the source dir actually exists + if storage.exists(path): + matched_path = storage.path(path) + if matched_path: + return matched_path + + +class BaseStorageFinder(BaseFinder): + """ + A base static files finder to be used to extended + with an own storage class. + """ + storage = None + + def __init__(self, storage=None, *args, **kwargs): + if storage is not None: + self.storage = storage + if self.storage is None: + raise ImproperlyConfigured("The staticfiles storage finder %r " + "doesn't have a storage class " + "assigned." % self.__class__) + # Make sure we have a storage instance here. + if not isinstance(self.storage, (Storage, LazyObject)): + self.storage = self.storage() + super().__init__(*args, **kwargs) + + def find(self, path, all=False): + """ + Look for files in the default file storage, if it's local. + """ + try: + self.storage.path('') + except NotImplementedError: + pass + else: + if self.storage.location not in searched_locations: + searched_locations.append(self.storage.location) + if self.storage.exists(path): + match = self.storage.path(path) + if all: + match = [match] + return match + return [] + + def list(self, ignore_patterns): + """ + List all files of the storage. + """ + for path in utils.get_files(self.storage, ignore_patterns): + yield path, self.storage + + +class DefaultStorageFinder(BaseStorageFinder): + """ + A static files finder that uses the default storage backend. + """ + storage = default_storage + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + base_location = getattr(self.storage, 'base_location', empty) + if not base_location: + raise ImproperlyConfigured("The storage backend of the " + "staticfiles finder %r doesn't have " + "a valid location." % self.__class__) + + +def find(path, all=False): + """ + Find a static file with the given path using all enabled finders. + + If ``all`` is ``False`` (default), return the first matching + absolute path (or ``None`` if no match). Otherwise return a list. + """ + searched_locations[:] = [] + matches = [] + for finder in get_finders(): + result = finder.find(path, all=all) + if not all and result: + return result + if not isinstance(result, (list, tuple)): + result = [result] + matches.extend(result) + if matches: + return matches + # No match. + return [] if all else None + + +def get_finders(): + for finder_path in settings.STATICFILES_FINDERS: + yield get_finder(finder_path) + + +@functools.lru_cache(maxsize=None) +def get_finder(import_path): + """ + Import the staticfiles finder class described by import_path, where + import_path is the full Python path to the class. + """ + Finder = import_string(import_path) + if not issubclass(Finder, BaseFinder): + raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' % + (Finder, BaseFinder)) + return Finder() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/handlers.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..2ec6d07d7122c71dc0b47fd7fedd208b0e18af03 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/handlers.py @@ -0,0 +1,67 @@ +from urllib.parse import urlparse +from urllib.request import url2pathname + +from django.conf import settings +from django.contrib.staticfiles import utils +from django.contrib.staticfiles.views import serve +from django.core.handlers.wsgi import WSGIHandler, get_path_info + + +class StaticFilesHandler(WSGIHandler): + """ + WSGI middleware that intercepts calls to the static files directory, as + defined by the STATIC_URL setting, and serves those files. + """ + # May be used to differentiate between handler types (e.g. in a + # request_finished signal) + handles_files = True + + def __init__(self, application): + self.application = application + self.base_url = urlparse(self.get_base_url()) + super().__init__() + + def load_middleware(self): + # Middleware are already loaded for self.application; no need to reload + # them for self. + pass + + def get_base_url(self): + utils.check_settings() + return settings.STATIC_URL + + def _should_handle(self, path): + """ + Check if the path should be handled. Ignore the path if: + * the host is provided as part of the base_url + * the request's path isn't under the media path (or equal) + """ + return path.startswith(self.base_url[2]) and not self.base_url[1] + + def file_path(self, url): + """ + Return the relative path to the media file on disk for the given URL. + """ + relative_url = url[len(self.base_url[2]):] + return url2pathname(relative_url) + + def serve(self, request): + """Serve the request path.""" + return serve(request, self.file_path(request.path), insecure=True) + + def get_response(self, request): + from django.http import Http404 + + if self._should_handle(request.path): + try: + return self.serve(request) + except Http404 as e: + if settings.DEBUG: + from django.views import debug + return debug.technical_404_response(request, e) + return super().get_response(request) + + def __call__(self, environ, start_response): + if not self._should_handle(get_path_info(environ)): + return self.application(environ, start_response) + return super().__call__(environ, start_response) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ae48f9fe1edeffabfa5a8216b6ddbd9f1ed602 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py @@ -0,0 +1,355 @@ +import os +from collections import OrderedDict + +from django.apps import apps +from django.contrib.staticfiles.finders import get_finders +from django.contrib.staticfiles.storage import staticfiles_storage +from django.core.files.storage import FileSystemStorage +from django.core.management.base import BaseCommand, CommandError +from django.core.management.color import no_style +from django.utils.functional import cached_property + + +class Command(BaseCommand): + """ + Copies or symlinks static files from different locations to the + settings.STATIC_ROOT. + """ + help = "Collect static files in a single location." + requires_system_checks = False + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.copied_files = [] + self.symlinked_files = [] + self.unmodified_files = [] + self.post_processed_files = [] + self.storage = staticfiles_storage + self.style = no_style() + + @cached_property + def local(self): + try: + self.storage.path('') + except NotImplementedError: + return False + return True + + def add_arguments(self, parser): + parser.add_argument( + '--noinput', '--no-input', action='store_false', dest='interactive', + help="Do NOT prompt the user for input of any kind.", + ) + parser.add_argument( + '--no-post-process', action='store_false', dest='post_process', + help="Do NOT post process collected files.", + ) + parser.add_argument( + '-i', '--ignore', action='append', default=[], + dest='ignore_patterns', metavar='PATTERN', + help="Ignore files or directories matching this glob-style " + "pattern. Use multiple times to ignore more.", + ) + parser.add_argument( + '-n', '--dry-run', action='store_true', dest='dry_run', + help="Do everything except modify the filesystem.", + ) + parser.add_argument( + '-c', '--clear', action='store_true', dest='clear', + help="Clear the existing files using the storage " + "before trying to copy or link the original file.", + ) + parser.add_argument( + '-l', '--link', action='store_true', dest='link', + help="Create a symbolic link to each file instead of copying.", + ) + parser.add_argument( + '--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', + help="Don't ignore the common private glob-style patterns (defaults to 'CVS', '.*' and '*~').", + ) + + def set_options(self, **options): + """ + Set instance variables based on an options dict + """ + self.interactive = options['interactive'] + self.verbosity = options['verbosity'] + self.symlink = options['link'] + self.clear = options['clear'] + self.dry_run = options['dry_run'] + ignore_patterns = options['ignore_patterns'] + if options['use_default_ignore_patterns']: + ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns + self.ignore_patterns = list(set(ignore_patterns)) + self.post_process = options['post_process'] + + def collect(self): + """ + Perform the bulk of the work of collectstatic. + + Split off from handle() to facilitate testing. + """ + if self.symlink and not self.local: + raise CommandError("Can't symlink to a remote destination.") + + if self.clear: + self.clear_dir('') + + if self.symlink: + handler = self.link_file + else: + handler = self.copy_file + + found_files = OrderedDict() + for finder in get_finders(): + for path, storage in finder.list(self.ignore_patterns): + # Prefix the relative path if the source storage contains it + if getattr(storage, 'prefix', None): + prefixed_path = os.path.join(storage.prefix, path) + else: + prefixed_path = path + + if prefixed_path not in found_files: + found_files[prefixed_path] = (storage, path) + handler(path, prefixed_path, storage) + else: + self.log( + "Found another file with the destination path '%s'. It " + "will be ignored since only the first encountered file " + "is collected. If this is not what you want, make sure " + "every static file has a unique path." % prefixed_path, + level=1, + ) + + # Here we check if the storage backend has a post_process + # method and pass it the list of modified files. + if self.post_process and hasattr(self.storage, 'post_process'): + processor = self.storage.post_process(found_files, + dry_run=self.dry_run) + for original_path, processed_path, processed in processor: + if isinstance(processed, Exception): + self.stderr.write("Post-processing '%s' failed!" % original_path) + # Add a blank line before the traceback, otherwise it's + # too easy to miss the relevant part of the error message. + self.stderr.write("") + raise processed + if processed: + self.log("Post-processed '%s' as '%s'" % + (original_path, processed_path), level=1) + self.post_processed_files.append(original_path) + else: + self.log("Skipped post-processing '%s'" % original_path) + + return { + 'modified': self.copied_files + self.symlinked_files, + 'unmodified': self.unmodified_files, + 'post_processed': self.post_processed_files, + } + + def handle(self, **options): + self.set_options(**options) + + message = ['\n'] + if self.dry_run: + message.append( + 'You have activated the --dry-run option so no files will be modified.\n\n' + ) + + message.append( + 'You have requested to collect static files at the destination\n' + 'location as specified in your settings' + ) + + if self.is_local_storage() and self.storage.location: + destination_path = self.storage.location + message.append(':\n\n %s\n\n' % destination_path) + should_warn_user = ( + self.storage.exists(destination_path) and + any(self.storage.listdir(destination_path)) + ) + else: + destination_path = None + message.append('.\n\n') + # Destination files existence not checked; play it safe and warn. + should_warn_user = True + + if self.interactive and should_warn_user: + if self.clear: + message.append('This will DELETE ALL FILES in this location!\n') + else: + message.append('This will overwrite existing files!\n') + + message.append( + 'Are you sure you want to do this?\n\n' + "Type 'yes' to continue, or 'no' to cancel: " + ) + if input(''.join(message)) != 'yes': + raise CommandError("Collecting static files cancelled.") + + collected = self.collect() + modified_count = len(collected['modified']) + unmodified_count = len(collected['unmodified']) + post_processed_count = len(collected['post_processed']) + + if self.verbosity >= 1: + template = ("\n%(modified_count)s %(identifier)s %(action)s" + "%(destination)s%(unmodified)s%(post_processed)s.\n") + summary = template % { + 'modified_count': modified_count, + 'identifier': 'static file' + ('' if modified_count == 1 else 's'), + 'action': 'symlinked' if self.symlink else 'copied', + 'destination': (" to '%s'" % destination_path if destination_path else ''), + 'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''), + 'post_processed': (collected['post_processed'] and + ', %s post-processed' + % post_processed_count or ''), + } + return summary + + def log(self, msg, level=2): + """ + Small log helper + """ + if self.verbosity >= level: + self.stdout.write(msg) + + def is_local_storage(self): + return isinstance(self.storage, FileSystemStorage) + + def clear_dir(self, path): + """ + Delete the given relative path using the destination storage backend. + """ + if not self.storage.exists(path): + return + + dirs, files = self.storage.listdir(path) + for f in files: + fpath = os.path.join(path, f) + if self.dry_run: + self.log("Pretending to delete '%s'" % fpath, level=1) + else: + self.log("Deleting '%s'" % fpath, level=1) + try: + full_path = self.storage.path(fpath) + except NotImplementedError: + self.storage.delete(fpath) + else: + if not os.path.exists(full_path) and os.path.lexists(full_path): + # Delete broken symlinks + os.unlink(full_path) + else: + self.storage.delete(fpath) + for d in dirs: + self.clear_dir(os.path.join(path, d)) + + def delete_file(self, path, prefixed_path, source_storage): + """ + Check if the target file should be deleted if it already exists. + """ + if self.storage.exists(prefixed_path): + try: + # When was the target file modified last time? + target_last_modified = self.storage.get_modified_time(prefixed_path) + except (OSError, NotImplementedError, AttributeError): + # The storage doesn't support get_modified_time() or failed + pass + else: + try: + # When was the source file modified last time? + source_last_modified = source_storage.get_modified_time(path) + except (OSError, NotImplementedError, AttributeError): + pass + else: + # The full path of the target file + if self.local: + full_path = self.storage.path(prefixed_path) + # If it's --link mode and the path isn't a link (i.e. + # the previous collectstatic wasn't with --link) or if + # it's non-link mode and the path is a link (i.e. the + # previous collectstatic was with --link), the old + # links/files must be deleted so it's not safe to skip + # unmodified files. + can_skip_unmodified_files = not (self.symlink ^ os.path.islink(full_path)) + else: + full_path = None + # In remote storages, skipping is only based on the + # modified times since symlinks aren't relevant. + can_skip_unmodified_files = True + # Avoid sub-second precision (see #14665, #19540) + file_is_unmodified = ( + target_last_modified.replace(microsecond=0) >= + source_last_modified.replace(microsecond=0) + ) + if file_is_unmodified and can_skip_unmodified_files: + if prefixed_path not in self.unmodified_files: + self.unmodified_files.append(prefixed_path) + self.log("Skipping '%s' (not modified)" % path) + return False + # Then delete the existing file if really needed + if self.dry_run: + self.log("Pretending to delete '%s'" % path) + else: + self.log("Deleting '%s'" % path) + self.storage.delete(prefixed_path) + return True + + def link_file(self, path, prefixed_path, source_storage): + """ + Attempt to link ``path`` + """ + # Skip this file if it was already copied earlier + if prefixed_path in self.symlinked_files: + return self.log("Skipping '%s' (already linked earlier)" % path) + # Delete the target file if needed or break + if not self.delete_file(path, prefixed_path, source_storage): + return + # The full path of the source file + source_path = source_storage.path(path) + # Finally link the file + if self.dry_run: + self.log("Pretending to link '%s'" % source_path, level=1) + else: + self.log("Linking '%s'" % source_path, level=1) + full_path = self.storage.path(prefixed_path) + try: + os.makedirs(os.path.dirname(full_path)) + except OSError: + pass + try: + if os.path.lexists(full_path): + os.unlink(full_path) + os.symlink(source_path, full_path) + except AttributeError: + import platform + raise CommandError("Symlinking is not supported by Python %s." % + platform.python_version()) + except NotImplementedError: + import platform + raise CommandError("Symlinking is not supported in this " + "platform (%s)." % platform.platform()) + except OSError as e: + raise CommandError(e) + if prefixed_path not in self.symlinked_files: + self.symlinked_files.append(prefixed_path) + + def copy_file(self, path, prefixed_path, source_storage): + """ + Attempt to copy ``path`` with storage + """ + # Skip this file if it was already copied earlier + if prefixed_path in self.copied_files: + return self.log("Skipping '%s' (already copied earlier)" % path) + # Delete the target file if needed or break + if not self.delete_file(path, prefixed_path, source_storage): + return + # The full path of the source file + source_path = source_storage.path(path) + # Finally start copying + if self.dry_run: + self.log("Pretending to copy '%s'" % source_path, level=1) + else: + self.log("Copying '%s'" % source_path, level=1) + with source_storage.open(path) as source_file: + self.storage.save(prefixed_path, source_file) + self.copied_files.append(prefixed_path) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/findstatic.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/findstatic.py new file mode 100644 index 0000000000000000000000000000000000000000..cd58015788d613873ce0fd148ea4c218df162395 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/findstatic.py @@ -0,0 +1,43 @@ +import os + +from django.contrib.staticfiles import finders +from django.core.management.base import LabelCommand + + +class Command(LabelCommand): + help = "Finds the absolute paths for the given static file(s)." + label = 'staticfile' + + def add_arguments(self, parser): + super().add_arguments(parser) + parser.add_argument( + '--first', action='store_false', dest='all', + help="Only return the first match for each static file.", + ) + + def handle_label(self, path, **options): + verbosity = options['verbosity'] + result = finders.find(path, all=options['all']) + if verbosity >= 2: + searched_locations = ( + "\nLooking in the following locations:\n %s" % + "\n ".join(finders.searched_locations) + ) + else: + searched_locations = '' + if result: + if not isinstance(result, (list, tuple)): + result = [result] + result = (os.path.realpath(path) for path in result) + if verbosity >= 1: + file_list = '\n '.join(result) + return ("Found '%s' here:\n %s%s" % + (path, file_list, searched_locations)) + else: + return '\n'.join(result) + else: + message = ["No matching file found for '%s'." % path] + if verbosity >= 2: + message.append(searched_locations) + if verbosity >= 1: + self.stderr.write('\n'.join(message)) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/runserver.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/runserver.py new file mode 100644 index 0000000000000000000000000000000000000000..fe050f4dfa0e3c5a25545fa183092ebb21218dca --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/management/commands/runserver.py @@ -0,0 +1,32 @@ +from django.conf import settings +from django.contrib.staticfiles.handlers import StaticFilesHandler +from django.core.management.commands.runserver import ( + Command as RunserverCommand, +) + + +class Command(RunserverCommand): + help = "Starts a lightweight Web server for development and also serves static files." + + def add_arguments(self, parser): + super().add_arguments(parser) + parser.add_argument( + '--nostatic', action="store_false", dest='use_static_handler', + help='Tells Django to NOT automatically serve static files at STATIC_URL.', + ) + parser.add_argument( + '--insecure', action="store_true", dest='insecure_serving', + help='Allows serving static files even if DEBUG is False.', + ) + + def get_handler(self, *args, **options): + """ + Return the static files serving handler wrapping the default handler, + if static files should be served. Otherwise return the default handler. + """ + handler = super().get_handler(*args, **options) + use_static_handler = options['use_static_handler'] + insecure_serving = options['insecure_serving'] + if use_static_handler and (settings.DEBUG or insecure_serving): + return StaticFilesHandler(handler) + return handler diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/storage.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..ad9b4b0124b49fbe1f5a6bb470bb3888774a7fd0 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/storage.py @@ -0,0 +1,497 @@ +import hashlib +import json +import os +import posixpath +import re +from collections import OrderedDict +from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit + +from django.conf import settings +from django.contrib.staticfiles.utils import check_settings, matches_patterns +from django.core.cache import ( + InvalidCacheBackendError, cache as default_cache, caches, +) +from django.core.exceptions import ImproperlyConfigured +from django.core.files.base import ContentFile +from django.core.files.storage import FileSystemStorage, get_storage_class +from django.utils.encoding import force_bytes +from django.utils.functional import LazyObject + + +class StaticFilesStorage(FileSystemStorage): + """ + Standard file system storage for static files. + + The defaults for ``location`` and ``base_url`` are + ``STATIC_ROOT`` and ``STATIC_URL``. + """ + def __init__(self, location=None, base_url=None, *args, **kwargs): + if location is None: + location = settings.STATIC_ROOT + if base_url is None: + base_url = settings.STATIC_URL + check_settings(base_url) + super().__init__(location, base_url, *args, **kwargs) + # FileSystemStorage fallbacks to MEDIA_ROOT when location + # is empty, so we restore the empty value. + if not location: + self.base_location = None + self.location = None + + def path(self, name): + if not self.location: + raise ImproperlyConfigured("You're using the staticfiles app " + "without having set the STATIC_ROOT " + "setting to a filesystem path.") + return super().path(name) + + +class HashedFilesMixin: + default_template = """url("%s")""" + max_post_process_passes = 5 + patterns = ( + ("*.css", ( + r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""", + (r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""), + )), + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._patterns = OrderedDict() + self.hashed_files = {} + for extension, patterns in self.patterns: + for pattern in patterns: + if isinstance(pattern, (tuple, list)): + pattern, template = pattern + else: + template = self.default_template + compiled = re.compile(pattern, re.IGNORECASE) + self._patterns.setdefault(extension, []).append((compiled, template)) + + def file_hash(self, name, content=None): + """ + Return a hash of the file with the given name and optional content. + """ + if content is None: + return None + md5 = hashlib.md5() + for chunk in content.chunks(): + md5.update(chunk) + return md5.hexdigest()[:12] + + def hashed_name(self, name, content=None, filename=None): + # `filename` is the name of file to hash if `content` isn't given. + # `name` is the base name to construct the new hashed filename from. + parsed_name = urlsplit(unquote(name)) + clean_name = parsed_name.path.strip() + if filename: + filename = urlsplit(unquote(filename)).path.strip() + filename = filename or clean_name + opened = False + if content is None: + if not self.exists(filename): + raise ValueError("The file '%s' could not be found with %r." % (filename, self)) + try: + content = self.open(filename) + except IOError: + # Handle directory paths and fragments + return name + opened = True + try: + file_hash = self.file_hash(clean_name, content) + finally: + if opened: + content.close() + path, filename = os.path.split(clean_name) + root, ext = os.path.splitext(filename) + if file_hash is not None: + file_hash = ".%s" % file_hash + hashed_name = os.path.join(path, "%s%s%s" % + (root, file_hash, ext)) + unparsed_name = list(parsed_name) + unparsed_name[2] = hashed_name + # Special casing for a @font-face hack, like url(myfont.eot?#iefix") + # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax + if '?#' in name and not unparsed_name[3]: + unparsed_name[2] += '?' + return urlunsplit(unparsed_name) + + def _url(self, hashed_name_func, name, force=False, hashed_files=None): + """ + Return the non-hashed URL in DEBUG mode. + """ + if settings.DEBUG and not force: + hashed_name, fragment = name, '' + else: + clean_name, fragment = urldefrag(name) + if urlsplit(clean_name).path.endswith('/'): # don't hash paths + hashed_name = name + else: + args = (clean_name,) + if hashed_files is not None: + args += (hashed_files,) + hashed_name = hashed_name_func(*args) + + final_url = super().url(hashed_name) + + # Special casing for a @font-face hack, like url(myfont.eot?#iefix") + # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax + query_fragment = '?#' in name # [sic!] + if fragment or query_fragment: + urlparts = list(urlsplit(final_url)) + if fragment and not urlparts[4]: + urlparts[4] = fragment + if query_fragment and not urlparts[3]: + urlparts[2] += '?' + final_url = urlunsplit(urlparts) + + return unquote(final_url) + + def url(self, name, force=False): + """ + Return the non-hashed URL in DEBUG mode. + """ + return self._url(self.stored_name, name, force) + + def url_converter(self, name, hashed_files, template=None): + """ + Return the custom URL converter for the given file name. + """ + if template is None: + template = self.default_template + + def converter(matchobj): + """ + Convert the matched URL to a normalized and hashed URL. + + This requires figuring out which files the matched URL resolves + to and calling the url() method of the storage. + """ + matched, url = matchobj.groups() + + # Ignore absolute/protocol-relative and data-uri URLs. + if re.match(r'^[a-z]+:', url): + return matched + + # Ignore absolute URLs that don't point to a static file (dynamic + # CSS / JS?). Note that STATIC_URL cannot be empty. + if url.startswith('/') and not url.startswith(settings.STATIC_URL): + return matched + + # Strip off the fragment so a path-like fragment won't interfere. + url_path, fragment = urldefrag(url) + + if url_path.startswith('/'): + # Otherwise the condition above would have returned prematurely. + assert url_path.startswith(settings.STATIC_URL) + target_name = url_path[len(settings.STATIC_URL):] + else: + # We're using the posixpath module to mix paths and URLs conveniently. + source_name = name if os.sep == '/' else name.replace(os.sep, '/') + target_name = posixpath.join(posixpath.dirname(source_name), url_path) + + # Determine the hashed name of the target file with the storage backend. + hashed_url = self._url( + self._stored_name, unquote(target_name), + force=True, hashed_files=hashed_files, + ) + + transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:]) + + # Restore the fragment that was stripped off earlier. + if fragment: + transformed_url += ('?#' if '?#' in url else '#') + fragment + + # Return the hashed version to the file + return template % unquote(transformed_url) + + return converter + + def post_process(self, paths, dry_run=False, **options): + """ + Post process the given OrderedDict of files (called from collectstatic). + + Processing is actually two separate operations: + + 1. renaming files to include a hash of their content for cache-busting, + and copying those files to the target storage. + 2. adjusting files which contain references to other files so they + refer to the cache-busting filenames. + + If either of these are performed on a file, then that file is considered + post-processed. + """ + # don't even dare to process the files if we're in dry run mode + if dry_run: + return + + # where to store the new paths + hashed_files = OrderedDict() + + # build a list of adjustable files + adjustable_paths = [ + path for path in paths + if matches_patterns(path, self._patterns) + ] + # Do a single pass first. Post-process all files once, then repeat for + # adjustable files. + for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files): + yield name, hashed_name, processed + + paths = {path: paths[path] for path in adjustable_paths} + + for i in range(self.max_post_process_passes): + substitutions = False + for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files): + yield name, hashed_name, processed + substitutions = substitutions or subst + + if not substitutions: + break + + if substitutions: + yield 'All', None, RuntimeError('Max post-process passes exceeded.') + + # Store the processed paths + self.hashed_files.update(hashed_files) + + def _post_process(self, paths, adjustable_paths, hashed_files): + # Sort the files by directory level + def path_level(name): + return len(name.split(os.sep)) + + for name in sorted(paths, key=path_level, reverse=True): + substitutions = True + # use the original, local file, not the copied-but-unprocessed + # file, which might be somewhere far away, like S3 + storage, path = paths[name] + with storage.open(path) as original_file: + cleaned_name = self.clean_name(name) + hash_key = self.hash_key(cleaned_name) + + # generate the hash with the original content, even for + # adjustable files. + if hash_key not in hashed_files: + hashed_name = self.hashed_name(name, original_file) + else: + hashed_name = hashed_files[hash_key] + + # then get the original's file content.. + if hasattr(original_file, 'seek'): + original_file.seek(0) + + hashed_file_exists = self.exists(hashed_name) + processed = False + + # ..to apply each replacement pattern to the content + if name in adjustable_paths: + old_hashed_name = hashed_name + content = original_file.read().decode(settings.FILE_CHARSET) + for extension, patterns in self._patterns.items(): + if matches_patterns(path, (extension,)): + for pattern, template in patterns: + converter = self.url_converter(name, hashed_files, template) + try: + content = pattern.sub(converter, content) + except ValueError as exc: + yield name, None, exc, False + if hashed_file_exists: + self.delete(hashed_name) + # then save the processed result + content_file = ContentFile(force_bytes(content)) + # Save intermediate file for reference + saved_name = self._save(hashed_name, content_file) + hashed_name = self.hashed_name(name, content_file) + + if self.exists(hashed_name): + self.delete(hashed_name) + + saved_name = self._save(hashed_name, content_file) + hashed_name = self.clean_name(saved_name) + # If the file hash stayed the same, this file didn't change + if old_hashed_name == hashed_name: + substitutions = False + processed = True + + if not processed: + # or handle the case in which neither processing nor + # a change to the original file happened + if not hashed_file_exists: + processed = True + saved_name = self._save(hashed_name, original_file) + hashed_name = self.clean_name(saved_name) + + # and then set the cache accordingly + hashed_files[hash_key] = hashed_name + + yield name, hashed_name, processed, substitutions + + def clean_name(self, name): + return name.replace('\\', '/') + + def hash_key(self, name): + return name + + def _stored_name(self, name, hashed_files): + # Normalize the path to avoid multiple names for the same file like + # ../foo/bar.css and ../foo/../foo/bar.css which normalize to the same + # path. + name = posixpath.normpath(name) + cleaned_name = self.clean_name(name) + hash_key = self.hash_key(cleaned_name) + cache_name = hashed_files.get(hash_key) + if cache_name is None: + cache_name = self.clean_name(self.hashed_name(name)) + return cache_name + + def stored_name(self, name): + cleaned_name = self.clean_name(name) + hash_key = self.hash_key(cleaned_name) + cache_name = self.hashed_files.get(hash_key) + if cache_name: + return cache_name + # No cached name found, recalculate it from the files. + intermediate_name = name + for i in range(self.max_post_process_passes + 1): + cache_name = self.clean_name( + self.hashed_name(name, content=None, filename=intermediate_name) + ) + if intermediate_name == cache_name: + # Store the hashed name if there was a miss. + self.hashed_files[hash_key] = cache_name + return cache_name + else: + # Move on to the next intermediate file. + intermediate_name = cache_name + # If the cache name can't be determined after the max number of passes, + # the intermediate files on disk may be corrupt; avoid an infinite loop. + raise ValueError("The name '%s' could not be hashed with %r." % (name, self)) + + +class ManifestFilesMixin(HashedFilesMixin): + manifest_version = '1.0' # the manifest format standard + manifest_name = 'staticfiles.json' + manifest_strict = True + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.hashed_files = self.load_manifest() + + def read_manifest(self): + try: + with self.open(self.manifest_name) as manifest: + return manifest.read().decode() + except IOError: + return None + + def load_manifest(self): + content = self.read_manifest() + if content is None: + return OrderedDict() + try: + stored = json.loads(content, object_pairs_hook=OrderedDict) + except ValueError: + pass + else: + version = stored.get('version') + if version == '1.0': + return stored.get('paths', OrderedDict()) + raise ValueError("Couldn't load manifest '%s' (version %s)" % + (self.manifest_name, self.manifest_version)) + + def post_process(self, *args, **kwargs): + self.hashed_files = OrderedDict() + yield from super().post_process(*args, **kwargs) + self.save_manifest() + + def save_manifest(self): + payload = {'paths': self.hashed_files, 'version': self.manifest_version} + if self.exists(self.manifest_name): + self.delete(self.manifest_name) + contents = json.dumps(payload).encode() + self._save(self.manifest_name, ContentFile(contents)) + + def stored_name(self, name): + parsed_name = urlsplit(unquote(name)) + clean_name = parsed_name.path.strip() + hash_key = self.hash_key(clean_name) + cache_name = self.hashed_files.get(hash_key) + if cache_name is None: + if self.manifest_strict: + raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name) + cache_name = self.clean_name(self.hashed_name(name)) + unparsed_name = list(parsed_name) + unparsed_name[2] = cache_name + # Special casing for a @font-face hack, like url(myfont.eot?#iefix") + # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax + if '?#' in name and not unparsed_name[3]: + unparsed_name[2] += '?' + return urlunsplit(unparsed_name) + + +class _MappingCache: + """ + A small dict-like wrapper for a given cache backend instance. + """ + def __init__(self, cache): + self.cache = cache + + def __setitem__(self, key, value): + self.cache.set(key, value) + + def __getitem__(self, key): + value = self.cache.get(key) + if value is None: + raise KeyError("Couldn't find a file name '%s'" % key) + return value + + def clear(self): + self.cache.clear() + + def update(self, data): + self.cache.set_many(data) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + +class CachedFilesMixin(HashedFilesMixin): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + try: + self.hashed_files = _MappingCache(caches['staticfiles']) + except InvalidCacheBackendError: + # Use the default backend + self.hashed_files = _MappingCache(default_cache) + + def hash_key(self, name): + key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest() + return 'staticfiles:%s' % key + + +class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage): + """ + A static file system storage backend which also saves + hashed copies of the files it saves. + """ + pass + + +class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage): + """ + A static file system storage backend which also saves + hashed copies of the files it saves. + """ + pass + + +class ConfiguredStorage(LazyObject): + def _setup(self): + self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)() + + +staticfiles_storage = ConfiguredStorage() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/templatetags/staticfiles.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/templatetags/staticfiles.py new file mode 100644 index 0000000000000000000000000000000000000000..eeb7452dbfaf715748b246a624a072851e36051b --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/templatetags/staticfiles.py @@ -0,0 +1,19 @@ +from django import template +from django.templatetags.static import ( + do_static as _do_static, static as _static, +) + +register = template.Library() + + +def static(path): + # Backwards compatibility alias for django.templatetags.static.static(). + # Deprecation should start in Django 2.0. + return _static(path) + + +@register.tag('static') +def do_static(parser, token): + # Backwards compatibility alias for django.templatetags.static.do_static(). + # Deprecation should start in Django 2.0. + return _do_static(parser, token) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/testing.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..546a24ae177c397f92d90cdf63da550ad1729ab4 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/testing.py @@ -0,0 +1,13 @@ +from django.contrib.staticfiles.handlers import StaticFilesHandler +from django.test import LiveServerTestCase + + +class StaticLiveServerTestCase(LiveServerTestCase): + """ + Extend django.test.LiveServerTestCase to transparently overlay at test + execution-time the assets provided by the staticfiles app finders. This + means you don't need to run collectstatic before or as a part of your tests + setup. + """ + + static_handler = StaticFilesHandler diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/urls.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/urls.py new file mode 100644 index 0000000000000000000000000000000000000000..6278f35b3573921bb4df0614bfa50e6787dc461e --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/urls.py @@ -0,0 +1,19 @@ +from django.conf import settings +from django.conf.urls.static import static +from django.contrib.staticfiles.views import serve + +urlpatterns = [] + + +def staticfiles_urlpatterns(prefix=None): + """ + Helper function to return a URL pattern for serving static files. + """ + if prefix is None: + prefix = settings.STATIC_URL + return static(prefix, view=serve) + + +# Only append if urlpatterns are empty +if settings.DEBUG and not urlpatterns: + urlpatterns += staticfiles_urlpatterns() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..67a7cb6d9edadbcb504572ce42094296e44beaec --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/utils.py @@ -0,0 +1,59 @@ +import fnmatch +import os + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured + + +def matches_patterns(path, patterns=None): + """ + Return True or False depending on whether the ``path`` should be + ignored (if it matches any pattern in ``ignore_patterns``). + """ + if patterns is None: + patterns = [] + for pattern in patterns: + if fnmatch.fnmatchcase(path, pattern): + return True + return False + + +def get_files(storage, ignore_patterns=None, location=''): + """ + Recursively walk the storage directories yielding the paths + of all files that should be copied. + """ + if ignore_patterns is None: + ignore_patterns = [] + directories, files = storage.listdir(location) + for fn in files: + if matches_patterns(fn, ignore_patterns): + continue + if location: + fn = os.path.join(location, fn) + yield fn + for dir in directories: + if matches_patterns(dir, ignore_patterns): + continue + if location: + dir = os.path.join(location, dir) + yield from get_files(storage, ignore_patterns, dir) + + +def check_settings(base_url=None): + """ + Check if the staticfiles settings have sane values. + """ + if base_url is None: + base_url = settings.STATIC_URL + if not base_url: + raise ImproperlyConfigured( + "You're using the staticfiles app " + "without having set the required STATIC_URL setting.") + if settings.MEDIA_URL == base_url: + raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL " + "settings must have different values") + if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and + (settings.MEDIA_ROOT == settings.STATIC_ROOT)): + raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT " + "settings must have different values") diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/views.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/views.py new file mode 100644 index 0000000000000000000000000000000000000000..9987f49f73de43acb97692d24de728f524ed1d56 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/staticfiles/views.py @@ -0,0 +1,39 @@ +""" +Views and functions for serving static files. These are only to be used during +development, and SHOULD NOT be used in a production setting. + +""" +import os +import posixpath + +from django.conf import settings +from django.contrib.staticfiles import finders +from django.http import Http404 +from django.views import static + + +def serve(request, path, insecure=False, **kwargs): + """ + Serve static files below a given point in the directory structure or + from locations inferred from the staticfiles finders. + + To use, put a URL pattern such as:: + + from django.contrib.staticfiles import views + + url(r'^(?P.*)$', views.serve) + + in your URLconf. + + It uses the django.views.static.serve() view to serve the found files. + """ + if not settings.DEBUG and not insecure: + raise Http404 + normalized_path = posixpath.normpath(path).lstrip('/') + absolute_path = finders.find(normalized_path) + if not absolute_path: + if path.endswith('/') or path == '': + raise Http404("Directory indexes are not allowed here.") + raise Http404("'%s' could not be found" % path) + document_root, path = os.path.split(absolute_path) + return static.serve(request, path, document_root=document_root, **kwargs) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3f52578daa0c6a58f6df47529306b6a0396c80 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/__init__.py @@ -0,0 +1 @@ +default_app_config = 'django.contrib.syndication.apps.SyndicationConfig' diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/apps.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/apps.py new file mode 100644 index 0000000000000000000000000000000000000000..b3f7c6cd61211bd8a161ef432f6f08d95c6a141f --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig +from django.utils.translation import gettext_lazy as _ + + +class SyndicationConfig(AppConfig): + name = 'django.contrib.syndication' + verbose_name = _("Syndication") diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/views.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/views.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b98c84ae1a7bba2fd2bf9257076cf1ed93dc43 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/contrib/syndication/views.py @@ -0,0 +1,218 @@ +from calendar import timegm + +from django.conf import settings +from django.contrib.sites.shortcuts import get_current_site +from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist +from django.http import Http404, HttpResponse +from django.template import TemplateDoesNotExist, loader +from django.utils import feedgenerator +from django.utils.encoding import iri_to_uri +from django.utils.html import escape +from django.utils.http import http_date +from django.utils.timezone import get_default_timezone, is_naive, make_aware + + +def add_domain(domain, url, secure=False): + protocol = 'https' if secure else 'http' + if url.startswith('//'): + # Support network-path reference (see #16753) - RSS requires a protocol + url = '%s:%s' % (protocol, url) + elif not url.startswith(('http://', 'https://', 'mailto:')): + url = iri_to_uri('%s://%s%s' % (protocol, domain, url)) + return url + + +class FeedDoesNotExist(ObjectDoesNotExist): + pass + + +class Feed: + feed_type = feedgenerator.DefaultFeed + title_template = None + description_template = None + + def __call__(self, request, *args, **kwargs): + try: + obj = self.get_object(request, *args, **kwargs) + except ObjectDoesNotExist: + raise Http404('Feed object does not exist.') + feedgen = self.get_feed(obj, request) + response = HttpResponse(content_type=feedgen.content_type) + if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'): + # if item_pubdate or item_updateddate is defined for the feed, set + # header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED + response['Last-Modified'] = http_date( + timegm(feedgen.latest_post_date().utctimetuple())) + feedgen.write(response, 'utf-8') + return response + + def item_title(self, item): + # Titles should be double escaped by default (see #6533) + return escape(str(item)) + + def item_description(self, item): + return str(item) + + def item_link(self, item): + try: + return item.get_absolute_url() + except AttributeError: + raise ImproperlyConfigured( + 'Give your %s class a get_absolute_url() method, or define an ' + 'item_link() method in your Feed class.' % item.__class__.__name__ + ) + + def item_enclosures(self, item): + enc_url = self._get_dynamic_attr('item_enclosure_url', item) + if enc_url: + enc = feedgenerator.Enclosure( + url=str(enc_url), + length=str(self._get_dynamic_attr('item_enclosure_length', item)), + mime_type=str(self._get_dynamic_attr('item_enclosure_mime_type', item)), + ) + return [enc] + return [] + + def _get_dynamic_attr(self, attname, obj, default=None): + try: + attr = getattr(self, attname) + except AttributeError: + return default + if callable(attr): + # Check co_argcount rather than try/excepting the function and + # catching the TypeError, because something inside the function + # may raise the TypeError. This technique is more accurate. + try: + code = attr.__code__ + except AttributeError: + code = attr.__call__.__code__ + if code.co_argcount == 2: # one argument is 'self' + return attr(obj) + else: + return attr() + return attr + + def feed_extra_kwargs(self, obj): + """ + Return an extra keyword arguments dictionary that is used when + initializing the feed generator. + """ + return {} + + def item_extra_kwargs(self, item): + """ + Return an extra keyword arguments dictionary that is used with + the `add_item` call of the feed generator. + """ + return {} + + def get_object(self, request, *args, **kwargs): + return None + + def get_context_data(self, **kwargs): + """ + Return a dictionary to use as extra context if either + ``self.description_template`` or ``self.item_template`` are used. + + Default implementation preserves the old behavior + of using {'obj': item, 'site': current_site} as the context. + """ + return {'obj': kwargs.get('item'), 'site': kwargs.get('site')} + + def get_feed(self, obj, request): + """ + Return a feedgenerator.DefaultFeed object, fully populated, for + this feed. Raise FeedDoesNotExist for invalid parameters. + """ + current_site = get_current_site(request) + + link = self._get_dynamic_attr('link', obj) + link = add_domain(current_site.domain, link, request.is_secure()) + + feed = self.feed_type( + title=self._get_dynamic_attr('title', obj), + subtitle=self._get_dynamic_attr('subtitle', obj), + link=link, + description=self._get_dynamic_attr('description', obj), + language=settings.LANGUAGE_CODE, + feed_url=add_domain( + current_site.domain, + self._get_dynamic_attr('feed_url', obj) or request.path, + request.is_secure(), + ), + author_name=self._get_dynamic_attr('author_name', obj), + author_link=self._get_dynamic_attr('author_link', obj), + author_email=self._get_dynamic_attr('author_email', obj), + categories=self._get_dynamic_attr('categories', obj), + feed_copyright=self._get_dynamic_attr('feed_copyright', obj), + feed_guid=self._get_dynamic_attr('feed_guid', obj), + ttl=self._get_dynamic_attr('ttl', obj), + **self.feed_extra_kwargs(obj) + ) + + title_tmp = None + if self.title_template is not None: + try: + title_tmp = loader.get_template(self.title_template) + except TemplateDoesNotExist: + pass + + description_tmp = None + if self.description_template is not None: + try: + description_tmp = loader.get_template(self.description_template) + except TemplateDoesNotExist: + pass + + for item in self._get_dynamic_attr('items', obj): + context = self.get_context_data(item=item, site=current_site, + obj=obj, request=request) + if title_tmp is not None: + title = title_tmp.render(context, request) + else: + title = self._get_dynamic_attr('item_title', item) + if description_tmp is not None: + description = description_tmp.render(context, request) + else: + description = self._get_dynamic_attr('item_description', item) + link = add_domain( + current_site.domain, + self._get_dynamic_attr('item_link', item), + request.is_secure(), + ) + enclosures = self._get_dynamic_attr('item_enclosures', item) + author_name = self._get_dynamic_attr('item_author_name', item) + if author_name is not None: + author_email = self._get_dynamic_attr('item_author_email', item) + author_link = self._get_dynamic_attr('item_author_link', item) + else: + author_email = author_link = None + + tz = get_default_timezone() + + pubdate = self._get_dynamic_attr('item_pubdate', item) + if pubdate and is_naive(pubdate): + pubdate = make_aware(pubdate, tz) + + updateddate = self._get_dynamic_attr('item_updateddate', item) + if updateddate and is_naive(updateddate): + updateddate = make_aware(updateddate, tz) + + feed.add_item( + title=title, + link=link, + description=description, + unique_id=self._get_dynamic_attr('item_guid', item, link), + unique_id_is_permalink=self._get_dynamic_attr( + 'item_guid_is_permalink', item), + enclosures=enclosures, + pubdate=pubdate, + updateddate=updateddate, + author_name=author_name, + author_email=author_email, + author_link=author_link, + categories=self._get_dynamic_attr('item_categories', item), + item_copyright=self._get_dynamic_attr('item_copyright', item), + **self.item_extra_kwargs(item) + ) + return feed diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7aaa057229486c478aeb783d818bec02c16fcdb8 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/__init__.py @@ -0,0 +1,125 @@ +""" +Caching framework. + +This package defines set of cache backends that all conform to a simple API. +In a nutshell, a cache is a set of values -- which can be any object that +may be pickled -- identified by string keys. For the complete API, see +the abstract BaseCache class in django.core.cache.backends.base. + +Client code should use the `cache` variable defined here to access the default +cache backend and look up non-default cache backends in the `caches` dict-like +object. + +See docs/topics/cache.txt for information on the public API. +""" +from threading import local + +from django.conf import settings +from django.core import signals +from django.core.cache.backends.base import ( + BaseCache, CacheKeyWarning, InvalidCacheBackendError, +) +from django.utils.module_loading import import_string + +__all__ = [ + 'cache', 'DEFAULT_CACHE_ALIAS', 'InvalidCacheBackendError', + 'CacheKeyWarning', 'BaseCache', +] + +DEFAULT_CACHE_ALIAS = 'default' + + +def _create_cache(backend, **kwargs): + try: + # Try to get the CACHES entry for the given backend name first + try: + conf = settings.CACHES[backend] + except KeyError: + try: + # Trying to import the given backend, in case it's a dotted path + import_string(backend) + except ImportError as e: + raise InvalidCacheBackendError("Could not find backend '%s': %s" % ( + backend, e)) + location = kwargs.pop('LOCATION', '') + params = kwargs + else: + params = conf.copy() + params.update(kwargs) + backend = params.pop('BACKEND') + location = params.pop('LOCATION', '') + backend_cls = import_string(backend) + except ImportError as e: + raise InvalidCacheBackendError( + "Could not find backend '%s': %s" % (backend, e)) + return backend_cls(location, params) + + +class CacheHandler: + """ + A Cache Handler to manage access to Cache instances. + + Ensure only one instance of each alias exists per thread. + """ + def __init__(self): + self._caches = local() + + def __getitem__(self, alias): + try: + return self._caches.caches[alias] + except AttributeError: + self._caches.caches = {} + except KeyError: + pass + + if alias not in settings.CACHES: + raise InvalidCacheBackendError( + "Could not find config for '%s' in settings.CACHES" % alias + ) + + cache = _create_cache(alias) + self._caches.caches[alias] = cache + return cache + + def all(self): + return getattr(self._caches, 'caches', {}).values() + + +caches = CacheHandler() + + +class DefaultCacheProxy: + """ + Proxy access to the default Cache object's attributes. + + This allows the legacy `cache` object to be thread-safe using the new + ``caches`` API. + """ + def __getattr__(self, name): + return getattr(caches[DEFAULT_CACHE_ALIAS], name) + + def __setattr__(self, name, value): + return setattr(caches[DEFAULT_CACHE_ALIAS], name, value) + + def __delattr__(self, name): + return delattr(caches[DEFAULT_CACHE_ALIAS], name) + + def __contains__(self, key): + return key in caches[DEFAULT_CACHE_ALIAS] + + def __eq__(self, other): + return caches[DEFAULT_CACHE_ALIAS] == other + + +cache = DefaultCacheProxy() + + +def close_caches(**kwargs): + # Some caches -- python-memcached in particular -- need to do a cleanup at the + # end of a request cycle. If not implemented in a particular backend + # cache.close is a no-op + for cache in caches.all(): + cache.close() + + +signals.request_finished.connect(close_caches) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/base.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0df7cc68ae1d64d31c1683e4e2a6edd6ff4a41 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/base.py @@ -0,0 +1,276 @@ +"Base Cache class." +import time +import warnings + +from django.core.exceptions import ImproperlyConfigured +from django.utils.module_loading import import_string + + +class InvalidCacheBackendError(ImproperlyConfigured): + pass + + +class CacheKeyWarning(RuntimeWarning): + pass + + +# Stub class to ensure not passing in a `timeout` argument results in +# the default timeout +DEFAULT_TIMEOUT = object() + +# Memcached does not accept keys longer than this. +MEMCACHE_MAX_KEY_LENGTH = 250 + + +def default_key_func(key, key_prefix, version): + """ + Default function to generate keys. + + Construct the key used by all other methods. By default, prepend + the `key_prefix'. KEY_FUNCTION can be used to specify an alternate + function with custom key making behavior. + """ + return '%s:%s:%s' % (key_prefix, version, key) + + +def get_key_func(key_func): + """ + Function to decide which key function to use. + + Default to ``default_key_func``. + """ + if key_func is not None: + if callable(key_func): + return key_func + else: + return import_string(key_func) + return default_key_func + + +class BaseCache: + def __init__(self, params): + timeout = params.get('timeout', params.get('TIMEOUT', 300)) + if timeout is not None: + try: + timeout = int(timeout) + except (ValueError, TypeError): + timeout = 300 + self.default_timeout = timeout + + options = params.get('OPTIONS', {}) + max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300)) + try: + self._max_entries = int(max_entries) + except (ValueError, TypeError): + self._max_entries = 300 + + cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3)) + try: + self._cull_frequency = int(cull_frequency) + except (ValueError, TypeError): + self._cull_frequency = 3 + + self.key_prefix = params.get('KEY_PREFIX', '') + self.version = params.get('VERSION', 1) + self.key_func = get_key_func(params.get('KEY_FUNCTION')) + + def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT): + """ + Return the timeout value usable by this backend based upon the provided + timeout. + """ + if timeout == DEFAULT_TIMEOUT: + timeout = self.default_timeout + elif timeout == 0: + # ticket 21147 - avoid time.time() related precision issues + timeout = -1 + return None if timeout is None else time.time() + timeout + + def make_key(self, key, version=None): + """ + Construct the key used by all other methods. By default, use the + key_func to generate a key (which, by default, prepends the + `key_prefix' and 'version'). A different key function can be provided + at the time of cache construction; alternatively, you can subclass the + cache backend to provide custom key making behavior. + """ + if version is None: + version = self.version + + new_key = self.key_func(key, self.key_prefix, version) + return new_key + + def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + """ + Set a value in the cache if the key does not already exist. If + timeout is given, use that timeout for the key; otherwise use the + default cache timeout. + + Return True if the value was stored, False otherwise. + """ + raise NotImplementedError('subclasses of BaseCache must provide an add() method') + + def get(self, key, default=None, version=None): + """ + Fetch a given key from the cache. If the key does not exist, return + default, which itself defaults to None. + """ + raise NotImplementedError('subclasses of BaseCache must provide a get() method') + + def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + """ + Set a value in the cache. If timeout is given, use that timeout for the + key; otherwise use the default cache timeout. + """ + raise NotImplementedError('subclasses of BaseCache must provide a set() method') + + def delete(self, key, version=None): + """ + Delete a key from the cache, failing silently. + """ + raise NotImplementedError('subclasses of BaseCache must provide a delete() method') + + def get_many(self, keys, version=None): + """ + Fetch a bunch of keys from the cache. For certain backends (memcached, + pgsql) this can be *much* faster when fetching multiple values. + + Return a dict mapping each key in keys to its value. If the given + key is missing, it will be missing from the response dict. + """ + d = {} + for k in keys: + val = self.get(k, version=version) + if val is not None: + d[k] = val + return d + + def get_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None): + """ + Fetch a given key from the cache. If the key does not exist, + add the key and set it to the default value. The default value can + also be any callable. If timeout is given, use that timeout for the + key; otherwise use the default cache timeout. + + Return the value of the key stored or retrieved. + """ + val = self.get(key, version=version) + if val is None: + if callable(default): + default = default() + if default is not None: + self.add(key, default, timeout=timeout, version=version) + # Fetch the value again to avoid a race condition if another + # caller added a value between the first get() and the add() + # above. + return self.get(key, default, version=version) + return val + + def has_key(self, key, version=None): + """ + Return True if the key is in the cache and has not expired. + """ + return self.get(key, version=version) is not None + + def incr(self, key, delta=1, version=None): + """ + Add delta to value in the cache. If the key does not exist, raise a + ValueError exception. + """ + value = self.get(key, version=version) + if value is None: + raise ValueError("Key '%s' not found" % key) + new_value = value + delta + self.set(key, new_value, version=version) + return new_value + + def decr(self, key, delta=1, version=None): + """ + Subtract delta from value in the cache. If the key does not exist, raise + a ValueError exception. + """ + return self.incr(key, -delta, version=version) + + def __contains__(self, key): + """ + Return True if the key is in the cache and has not expired. + """ + # This is a separate method, rather than just a copy of has_key(), + # so that it always has the same functionality as has_key(), even + # if a subclass overrides it. + return self.has_key(key) + + def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None): + """ + Set a bunch of values in the cache at once from a dict of key/value + pairs. For certain backends (memcached), this is much more efficient + than calling set() multiple times. + + If timeout is given, use that timeout for the key; otherwise use the + default cache timeout. + + On backends that support it, return a list of keys that failed + insertion, or an empty list if all keys were inserted successfully. + """ + for key, value in data.items(): + self.set(key, value, timeout=timeout, version=version) + return [] + + def delete_many(self, keys, version=None): + """ + Delete a bunch of values in the cache at once. For certain backends + (memcached), this is much more efficient than calling delete() multiple + times. + """ + for key in keys: + self.delete(key, version=version) + + def clear(self): + """Remove *all* values from the cache at once.""" + raise NotImplementedError('subclasses of BaseCache must provide a clear() method') + + def validate_key(self, key): + """ + Warn about keys that would not be portable to the memcached + backend. This encourages (but does not force) writing backend-portable + cache code. + """ + if len(key) > MEMCACHE_MAX_KEY_LENGTH: + warnings.warn( + 'Cache key will cause errors if used with memcached: %r ' + '(longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH), CacheKeyWarning + ) + for char in key: + if ord(char) < 33 or ord(char) == 127: + warnings.warn( + 'Cache key contains characters that will cause errors if ' + 'used with memcached: %r' % key, CacheKeyWarning + ) + break + + def incr_version(self, key, delta=1, version=None): + """ + Add delta to the cache version for the supplied key. Return the new + version. + """ + if version is None: + version = self.version + + value = self.get(key, version=version) + if value is None: + raise ValueError("Key '%s' not found" % key) + + self.set(key, value, version=version + delta) + self.delete(key, version=version) + return version + delta + + def decr_version(self, key, delta=1, version=None): + """ + Subtract delta from the cache version for the supplied key. Return the + new version. + """ + return self.incr_version(key, -delta, version) + + def close(self, **kwargs): + """Close the cache connection""" + pass diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/db.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/db.py new file mode 100644 index 0000000000000000000000000000000000000000..576d711a67b7caeb498f4a463d22b64d0f2f0647 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/db.py @@ -0,0 +1,208 @@ +"Database cache backend." +import base64 +import pickle +from datetime import datetime + +from django.conf import settings +from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache +from django.db import DatabaseError, connections, models, router, transaction +from django.utils import timezone +from django.utils.encoding import force_bytes +from django.utils.inspect import func_supports_parameter + + +class Options: + """A class that will quack like a Django model _meta class. + + This allows cache operations to be controlled by the router + """ + def __init__(self, table): + self.db_table = table + self.app_label = 'django_cache' + self.model_name = 'cacheentry' + self.verbose_name = 'cache entry' + self.verbose_name_plural = 'cache entries' + self.object_name = 'CacheEntry' + self.abstract = False + self.managed = True + self.proxy = False + self.swapped = False + + +class BaseDatabaseCache(BaseCache): + def __init__(self, table, params): + BaseCache.__init__(self, params) + self._table = table + + class CacheEntry: + _meta = Options(table) + self.cache_model_class = CacheEntry + + +class DatabaseCache(BaseDatabaseCache): + + # This class uses cursors provided by the database connection. This means + # it reads expiration values as aware or naive datetimes, depending on the + # value of USE_TZ and whether the database supports time zones. The ORM's + # conversion and adaptation infrastructure is then used to avoid comparing + # aware and naive datetimes accidentally. + + def get(self, key, default=None, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + db = router.db_for_read(self.cache_model_class) + connection = connections[db] + table = connection.ops.quote_name(self._table) + + with connection.cursor() as cursor: + cursor.execute("SELECT cache_key, value, expires FROM %s " + "WHERE cache_key = %%s" % table, [key]) + row = cursor.fetchone() + if row is None: + return default + + expires = row[2] + expression = models.Expression(output_field=models.DateTimeField()) + for converter in (connection.ops.get_db_converters(expression) + + expression.get_db_converters(connection)): + if func_supports_parameter(converter, 'context'): # RemovedInDjango30Warning + expires = converter(expires, expression, connection, {}) + else: + expires = converter(expires, expression, connection) + + if expires < timezone.now(): + db = router.db_for_write(self.cache_model_class) + connection = connections[db] + with connection.cursor() as cursor: + cursor.execute("DELETE FROM %s " + "WHERE cache_key = %%s" % table, [key]) + return default + + value = connection.ops.process_clob(row[1]) + return pickle.loads(base64.b64decode(force_bytes(value))) + + def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + self._base_set('set', key, value, timeout) + + def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + return self._base_set('add', key, value, timeout) + + def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT): + timeout = self.get_backend_timeout(timeout) + db = router.db_for_write(self.cache_model_class) + connection = connections[db] + table = connection.ops.quote_name(self._table) + + with connection.cursor() as cursor: + cursor.execute("SELECT COUNT(*) FROM %s" % table) + num = cursor.fetchone()[0] + now = timezone.now() + now = now.replace(microsecond=0) + if timeout is None: + exp = datetime.max + elif settings.USE_TZ: + exp = datetime.utcfromtimestamp(timeout) + else: + exp = datetime.fromtimestamp(timeout) + exp = exp.replace(microsecond=0) + if num > self._max_entries: + self._cull(db, cursor, now) + pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) + # The DB column is expecting a string, so make sure the value is a + # string, not bytes. Refs #19274. + b64encoded = base64.b64encode(pickled).decode('latin1') + try: + # Note: typecasting for datetimes is needed by some 3rd party + # database backends. All core backends work without typecasting, + # so be careful about changes here - test suite will NOT pick + # regressions. + with transaction.atomic(using=db): + cursor.execute("SELECT cache_key, expires FROM %s " + "WHERE cache_key = %%s" % table, [key]) + result = cursor.fetchone() + + if result: + current_expires = result[1] + expression = models.Expression(output_field=models.DateTimeField()) + for converter in (connection.ops.get_db_converters(expression) + + expression.get_db_converters(connection)): + if func_supports_parameter(converter, 'context'): # RemovedInDjango30Warning + current_expires = converter(current_expires, expression, connection, {}) + else: + current_expires = converter(current_expires, expression, connection) + + exp = connection.ops.adapt_datetimefield_value(exp) + if result and (mode == 'set' or (mode == 'add' and current_expires < now)): + cursor.execute("UPDATE %s SET value = %%s, expires = %%s " + "WHERE cache_key = %%s" % table, + [b64encoded, exp, key]) + else: + cursor.execute("INSERT INTO %s (cache_key, value, expires) " + "VALUES (%%s, %%s, %%s)" % table, + [key, b64encoded, exp]) + except DatabaseError: + # To be threadsafe, updates/inserts are allowed to fail silently + return False + else: + return True + + def delete(self, key, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + + db = router.db_for_write(self.cache_model_class) + connection = connections[db] + table = connection.ops.quote_name(self._table) + + with connection.cursor() as cursor: + cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key]) + + def has_key(self, key, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + + db = router.db_for_read(self.cache_model_class) + connection = connections[db] + table = connection.ops.quote_name(self._table) + + if settings.USE_TZ: + now = datetime.utcnow() + else: + now = datetime.now() + now = now.replace(microsecond=0) + + with connection.cursor() as cursor: + cursor.execute("SELECT cache_key FROM %s " + "WHERE cache_key = %%s and expires > %%s" % table, + [key, connection.ops.adapt_datetimefield_value(now)]) + return cursor.fetchone() is not None + + def _cull(self, db, cursor, now): + if self._cull_frequency == 0: + self.clear() + else: + connection = connections[db] + table = connection.ops.quote_name(self._table) + cursor.execute("DELETE FROM %s WHERE expires < %%s" % table, + [connection.ops.adapt_datetimefield_value(now)]) + cursor.execute("SELECT COUNT(*) FROM %s" % table) + num = cursor.fetchone()[0] + if num > self._max_entries: + cull_num = num // self._cull_frequency + cursor.execute( + connection.ops.cache_key_culling_sql() % table, + [cull_num]) + cursor.execute("DELETE FROM %s " + "WHERE cache_key < %%s" % table, + [cursor.fetchone()[0]]) + + def clear(self): + db = router.db_for_write(self.cache_model_class) + connection = connections[db] + table = connection.ops.quote_name(self._table) + with connection.cursor() as cursor: + cursor.execute('DELETE FROM %s' % table) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/dummy.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..7957df8072452549aa57d86e28163a17abbfbd4e --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/dummy.py @@ -0,0 +1,43 @@ +"Dummy cache backend" + +from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache + + +class DummyCache(BaseCache): + def __init__(self, host, *args, **kwargs): + BaseCache.__init__(self, *args, **kwargs) + + def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + return True + + def get(self, key, default=None, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + return default + + def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + + def delete(self, key, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + + def get_many(self, keys, version=None): + return {} + + def has_key(self, key, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + return False + + def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None): + return [] + + def delete_many(self, keys, version=None): + pass + + def clear(self): + pass diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/filebased.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/filebased.py new file mode 100644 index 0000000000000000000000000000000000000000..dccc3fdc652b9bed83eb3ad5e79ecff4405e155d --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/filebased.py @@ -0,0 +1,143 @@ +"File-based cache backend" +import glob +import hashlib +import os +import pickle +import random +import tempfile +import time +import zlib + +from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache +from django.core.files.move import file_move_safe +from django.utils.encoding import force_bytes + + +class FileBasedCache(BaseCache): + cache_suffix = '.djcache' + + def __init__(self, dir, params): + super().__init__(params) + self._dir = os.path.abspath(dir) + self._createdir() + + def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + if self.has_key(key, version): + return False + self.set(key, value, timeout, version) + return True + + def get(self, key, default=None, version=None): + fname = self._key_to_file(key, version) + try: + with open(fname, 'rb') as f: + if not self._is_expired(f): + return pickle.loads(zlib.decompress(f.read())) + except FileNotFoundError: + pass + return default + + def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + self._createdir() # Cache dir can be deleted at any time. + fname = self._key_to_file(key, version) + self._cull() # make some room if necessary + fd, tmp_path = tempfile.mkstemp(dir=self._dir) + renamed = False + try: + with open(fd, 'wb') as f: + expiry = self.get_backend_timeout(timeout) + f.write(pickle.dumps(expiry, pickle.HIGHEST_PROTOCOL)) + f.write(zlib.compress(pickle.dumps(value, pickle.HIGHEST_PROTOCOL))) + file_move_safe(tmp_path, fname, allow_overwrite=True) + renamed = True + finally: + if not renamed: + os.remove(tmp_path) + + def delete(self, key, version=None): + self._delete(self._key_to_file(key, version)) + + def _delete(self, fname): + if not fname.startswith(self._dir) or not os.path.exists(fname): + return + try: + os.remove(fname) + except FileNotFoundError: + # The file may have been removed by another process. + pass + + def has_key(self, key, version=None): + fname = self._key_to_file(key, version) + if os.path.exists(fname): + with open(fname, 'rb') as f: + return not self._is_expired(f) + return False + + def _cull(self): + """ + Remove random cache entries if max_entries is reached at a ratio + of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means + that the entire cache will be purged. + """ + filelist = self._list_cache_files() + num_entries = len(filelist) + if num_entries < self._max_entries: + return # return early if no culling is required + if self._cull_frequency == 0: + return self.clear() # Clear the cache when CULL_FREQUENCY = 0 + # Delete a random selection of entries + filelist = random.sample(filelist, + int(num_entries / self._cull_frequency)) + for fname in filelist: + self._delete(fname) + + def _createdir(self): + if not os.path.exists(self._dir): + try: + os.makedirs(self._dir, 0o700) + except FileExistsError: + pass + + def _key_to_file(self, key, version=None): + """ + Convert a key into a cache file path. Basically this is the + root cache path joined with the md5sum of the key and a suffix. + """ + key = self.make_key(key, version=version) + self.validate_key(key) + return os.path.join(self._dir, ''.join( + [hashlib.md5(force_bytes(key)).hexdigest(), self.cache_suffix])) + + def clear(self): + """ + Remove all the cache files. + """ + if not os.path.exists(self._dir): + return + for fname in self._list_cache_files(): + self._delete(fname) + + def _is_expired(self, f): + """ + Take an open cache file `f` and delete it if it's expired. + """ + try: + exp = pickle.load(f) + except EOFError: + exp = 0 # An empty file is considered expired. + if exp is not None and exp < time.time(): + f.close() # On Windows a file has to be closed before deleting + self._delete(f.name) + return True + return False + + def _list_cache_files(self): + """ + Get a list of paths to all the cache files. These are all the files + in the root cache dir that end on the cache_suffix. + """ + if not os.path.exists(self._dir): + return [] + filelist = [os.path.join(self._dir, fname) for fname + in glob.glob1(self._dir, '*%s' % self.cache_suffix)] + return filelist diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/locmem.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/locmem.py new file mode 100644 index 0000000000000000000000000000000000000000..b2600d3169f3083201c9ee9c55d0909e33721876 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/locmem.py @@ -0,0 +1,131 @@ +"Thread-safe in-memory cache backend." +import pickle +import time +from contextlib import contextmanager + +from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache +from django.utils.synch import RWLock + +# Global in-memory store of cache data. Keyed by name, to provide +# multiple named local memory caches. +_caches = {} +_expire_info = {} +_locks = {} + + +@contextmanager +def dummy(): + """A context manager that does nothing special.""" + yield + + +class LocMemCache(BaseCache): + def __init__(self, name, params): + BaseCache.__init__(self, params) + self._cache = _caches.setdefault(name, {}) + self._expire_info = _expire_info.setdefault(name, {}) + self._lock = _locks.setdefault(name, RWLock()) + + def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) + with self._lock.writer(): + if self._has_expired(key): + self._set(key, pickled, timeout) + return True + return False + + def get(self, key, default=None, version=None, acquire_lock=True): + key = self.make_key(key, version=version) + self.validate_key(key) + pickled = None + with (self._lock.reader() if acquire_lock else dummy()): + if not self._has_expired(key): + pickled = self._cache[key] + if pickled is not None: + try: + return pickle.loads(pickled) + except pickle.PickleError: + return default + + with (self._lock.writer() if acquire_lock else dummy()): + try: + del self._cache[key] + del self._expire_info[key] + except KeyError: + pass + return default + + def _set(self, key, value, timeout=DEFAULT_TIMEOUT): + if len(self._cache) >= self._max_entries: + self._cull() + self._cache[key] = value + self._expire_info[key] = self.get_backend_timeout(timeout) + + def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL) + with self._lock.writer(): + self._set(key, pickled, timeout) + + def incr(self, key, delta=1, version=None): + with self._lock.writer(): + value = self.get(key, version=version, acquire_lock=False) + if value is None: + raise ValueError("Key '%s' not found" % key) + new_value = value + delta + key = self.make_key(key, version=version) + pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL) + self._cache[key] = pickled + return new_value + + def has_key(self, key, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + with self._lock.reader(): + if not self._has_expired(key): + return True + + with self._lock.writer(): + try: + del self._cache[key] + del self._expire_info[key] + except KeyError: + pass + return False + + def _has_expired(self, key): + exp = self._expire_info.get(key, -1) + if exp is None or exp > time.time(): + return False + return True + + def _cull(self): + if self._cull_frequency == 0: + self.clear() + else: + doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0] + for k in doomed: + self._delete(k) + + def _delete(self, key): + try: + del self._cache[key] + except KeyError: + pass + try: + del self._expire_info[key] + except KeyError: + pass + + def delete(self, key, version=None): + key = self.make_key(key, version=version) + self.validate_key(key) + with self._lock.writer(): + self._delete(key) + + def clear(self): + self._cache.clear() + self._expire_info.clear() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/memcached.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/memcached.py new file mode 100644 index 0000000000000000000000000000000000000000..d49fd148fd7a741e0359d14bfb4317c273c85502 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/backends/memcached.py @@ -0,0 +1,200 @@ +"Memcached cache backend" + +import pickle +import re +import time +import warnings + +from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache +from django.utils.deprecation import RemovedInDjango21Warning +from django.utils.functional import cached_property + + +class BaseMemcachedCache(BaseCache): + def __init__(self, server, params, library, value_not_found_exception): + super().__init__(params) + if isinstance(server, str): + self._servers = re.split('[;,]', server) + else: + self._servers = server + + # The exception type to catch from the underlying library for a key + # that was not found. This is a ValueError for python-memcache, + # pylibmc.NotFound for pylibmc, and cmemcache will return None without + # raising an exception. + self.LibraryValueNotFoundException = value_not_found_exception + + self._lib = library + self._options = params.get('OPTIONS') or {} + + @property + def _cache(self): + """ + Implement transparent thread-safe access to a memcached client. + """ + if getattr(self, '_client', None) is None: + self._client = self._lib.Client(self._servers, **self._options) + + return self._client + + def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT): + """ + Memcached deals with long (> 30 days) timeouts in a special + way. Call this function to obtain a safe value for your timeout. + """ + if timeout == DEFAULT_TIMEOUT: + timeout = self.default_timeout + + if timeout is None: + # Using 0 in memcache sets a non-expiring timeout. + return 0 + elif int(timeout) == 0: + # Other cache backends treat 0 as set-and-expire. To achieve this + # in memcache backends, a negative timeout must be passed. + timeout = -1 + + if timeout > 2592000: # 60*60*24*30, 30 days + # See https://github.com/memcached/memcached/wiki/Programming#expiration + # "Expiration times can be set from 0, meaning "never expire", to + # 30 days. Any time higher than 30 days is interpreted as a Unix + # timestamp date. If you want to expire an object on January 1st of + # next year, this is how you do that." + # + # This means that we have to switch to absolute timestamps. + timeout += int(time.time()) + return int(timeout) + + def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + return self._cache.add(key, value, self.get_backend_timeout(timeout)) + + def get(self, key, default=None, version=None): + key = self.make_key(key, version=version) + val = self._cache.get(key) + if val is None: + return default + return val + + def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): + key = self.make_key(key, version=version) + if not self._cache.set(key, value, self.get_backend_timeout(timeout)): + # make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit) + self._cache.delete(key) + + def delete(self, key, version=None): + key = self.make_key(key, version=version) + self._cache.delete(key) + + def get_many(self, keys, version=None): + new_keys = [self.make_key(x, version=version) for x in keys] + ret = self._cache.get_multi(new_keys) + if ret: + m = dict(zip(new_keys, keys)) + return {m[k]: v for k, v in ret.items()} + return ret + + def close(self, **kwargs): + # Many clients don't clean up connections properly. + self._cache.disconnect_all() + + def incr(self, key, delta=1, version=None): + key = self.make_key(key, version=version) + # memcached doesn't support a negative delta + if delta < 0: + return self._cache.decr(key, -delta) + try: + val = self._cache.incr(key, delta) + + # python-memcache responds to incr on nonexistent keys by + # raising a ValueError, pylibmc by raising a pylibmc.NotFound + # and Cmemcache returns None. In all cases, + # we should raise a ValueError though. + except self.LibraryValueNotFoundException: + val = None + if val is None: + raise ValueError("Key '%s' not found" % key) + return val + + def decr(self, key, delta=1, version=None): + key = self.make_key(key, version=version) + # memcached doesn't support a negative delta + if delta < 0: + return self._cache.incr(key, -delta) + try: + val = self._cache.decr(key, delta) + + # python-memcache responds to incr on nonexistent keys by + # raising a ValueError, pylibmc by raising a pylibmc.NotFound + # and Cmemcache returns None. In all cases, + # we should raise a ValueError though. + except self.LibraryValueNotFoundException: + val = None + if val is None: + raise ValueError("Key '%s' not found" % key) + return val + + def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None): + safe_data = {} + original_keys = {} + for key, value in data.items(): + safe_key = self.make_key(key, version=version) + safe_data[safe_key] = value + original_keys[safe_key] = key + failed_keys = self._cache.set_multi(safe_data, self.get_backend_timeout(timeout)) + return [original_keys[k] for k in failed_keys] + + def delete_many(self, keys, version=None): + self._cache.delete_multi(self.make_key(key, version=version) for key in keys) + + def clear(self): + self._cache.flush_all() + + +class MemcachedCache(BaseMemcachedCache): + "An implementation of a cache binding using python-memcached" + def __init__(self, server, params): + import memcache + super().__init__(server, params, library=memcache, value_not_found_exception=ValueError) + + @property + def _cache(self): + if getattr(self, '_client', None) is None: + client_kwargs = {'pickleProtocol': pickle.HIGHEST_PROTOCOL} + client_kwargs.update(self._options) + self._client = self._lib.Client(self._servers, **client_kwargs) + return self._client + + +class PyLibMCCache(BaseMemcachedCache): + "An implementation of a cache binding using pylibmc" + def __init__(self, server, params): + import pylibmc + super().__init__(server, params, library=pylibmc, value_not_found_exception=pylibmc.NotFound) + + # The contents of `OPTIONS` was formerly only used to set the behaviors + # attribute, but is now passed directly to the Client constructor. As such, + # any options that don't match a valid keyword argument are removed and set + # under the `behaviors` key instead, to maintain backwards compatibility. + legacy_behaviors = {} + for option in list(self._options): + if option not in ('behaviors', 'binary', 'username', 'password'): + warnings.warn( + "Specifying pylibmc cache behaviors as a top-level property " + "within `OPTIONS` is deprecated. Move `%s` into a dict named " + "`behaviors` inside `OPTIONS` instead." % option, + RemovedInDjango21Warning, + stacklevel=2, + ) + legacy_behaviors[option] = self._options.pop(option) + + if legacy_behaviors: + self._options.setdefault('behaviors', {}).update(legacy_behaviors) + + @cached_property + def _cache(self): + return self._lib.Client(self._servers, **self._options) + + def close(self, **kwargs): + # libmemcached manages its own connections. Don't call disconnect_all() + # as it resets the failover state and creates unnecessary reconnects. + pass diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e144275f46f957285aa38be4ae3587980b5547 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/cache/utils.py @@ -0,0 +1,14 @@ +import hashlib +from urllib.parse import quote + +from django.utils.encoding import force_bytes + +TEMPLATE_FRAGMENT_KEY_TEMPLATE = 'template.cache.%s.%s' + + +def make_template_fragment_key(fragment_name, vary_on=None): + if vary_on is None: + vary_on = () + key = ':'.join(quote(str(var)) for var in vary_on) + args = hashlib.md5(force_bytes(key)) + return TEMPLATE_FRAGMENT_KEY_TEMPLATE % (fragment_name, args.hexdigest()) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef3f5db90e46da17dcac7d1ee7dbe28edbd9b11 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/__init__.py @@ -0,0 +1,23 @@ +from .messages import ( + CRITICAL, DEBUG, ERROR, INFO, WARNING, CheckMessage, Critical, Debug, + Error, Info, Warning, +) +from .registry import Tags, register, run_checks, tag_exists + +# Import these to force registration of checks +import django.core.checks.caches # NOQA isort:skip +import django.core.checks.database # NOQA isort:skip +import django.core.checks.model_checks # NOQA isort:skip +import django.core.checks.security.base # NOQA isort:skip +import django.core.checks.security.csrf # NOQA isort:skip +import django.core.checks.security.sessions # NOQA isort:skip +import django.core.checks.templates # NOQA isort:skip +import django.core.checks.urls # NOQA isort:skip + + +__all__ = [ + 'CheckMessage', + 'Debug', 'Info', 'Warning', 'Error', 'Critical', + 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', + 'register', 'run_checks', 'tag_exists', 'Tags', +] diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/caches.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/caches.py new file mode 100644 index 0000000000000000000000000000000000000000..0994a5b0e8a062758ef74d6d42d83149ad80c886 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/caches.py @@ -0,0 +1,16 @@ +from django.conf import settings +from django.core.cache import DEFAULT_CACHE_ALIAS + +from . import Error, Tags, register + +E001 = Error( + "You must define a '%s' cache in your CACHES setting." % DEFAULT_CACHE_ALIAS, + id='caches.E001', +) + + +@register(Tags.caches) +def check_default_cache_is_configured(app_configs, **kwargs): + if DEFAULT_CACHE_ALIAS not in settings.CACHES: + return [E001] + return [] diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/database.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/database.py new file mode 100644 index 0000000000000000000000000000000000000000..5778844fe964be790cc22d160e0b239c9a01a689 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/database.py @@ -0,0 +1,11 @@ +from django.db import connections + +from . import Tags, register + + +@register(Tags.database) +def check_database_backends(*args, **kwargs): + issues = [] + for conn in connections.all(): + issues.extend(conn.validation.check(**kwargs)) + return issues diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/messages.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/messages.py new file mode 100644 index 0000000000000000000000000000000000000000..aacac632eb4bec81cfad6a8c0dca6c951b2ab6aa --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/messages.py @@ -0,0 +1,75 @@ +# Levels +DEBUG = 10 +INFO = 20 +WARNING = 30 +ERROR = 40 +CRITICAL = 50 + + +class CheckMessage: + + def __init__(self, level, msg, hint=None, obj=None, id=None): + assert isinstance(level, int), "The first argument should be level." + self.level = level + self.msg = msg + self.hint = hint + self.obj = obj + self.id = id + + def __eq__(self, other): + return ( + isinstance(other, self.__class__) and + all(getattr(self, attr) == getattr(other, attr) + for attr in ['level', 'msg', 'hint', 'obj', 'id']) + ) + + def __str__(self): + from django.db import models + + if self.obj is None: + obj = "?" + elif isinstance(self.obj, models.base.ModelBase): + # We need to hardcode ModelBase and Field cases because its __str__ + # method doesn't return "applabel.modellabel" and cannot be changed. + obj = self.obj._meta.label + else: + obj = str(self.obj) + id = "(%s) " % self.id if self.id else "" + hint = "\n\tHINT: %s" % self.hint if self.hint else '' + return "%s: %s%s%s" % (obj, id, self.msg, hint) + + def __repr__(self): + return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % \ + (self.__class__.__name__, self.level, self.msg, self.hint, self.obj, self.id) + + def is_serious(self, level=ERROR): + return self.level >= level + + def is_silenced(self): + from django.conf import settings + return self.id in settings.SILENCED_SYSTEM_CHECKS + + +class Debug(CheckMessage): + def __init__(self, *args, **kwargs): + super().__init__(DEBUG, *args, **kwargs) + + +class Info(CheckMessage): + def __init__(self, *args, **kwargs): + super().__init__(INFO, *args, **kwargs) + + +class Warning(CheckMessage): + def __init__(self, *args, **kwargs): + super().__init__(WARNING, *args, **kwargs) + + +class Error(CheckMessage): + def __init__(self, *args, **kwargs): + super().__init__(ERROR, *args, **kwargs) + + +class Critical(CheckMessage): + def __init__(self, *args, **kwargs): + super().__init__(CRITICAL, *args, **kwargs) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/model_checks.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/model_checks.py new file mode 100644 index 0000000000000000000000000000000000000000..2397fe3bb28a5146cee3dbc6227cf56a43187fa4 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/model_checks.py @@ -0,0 +1,154 @@ +import inspect +import types +from itertools import chain + +from django.apps import apps +from django.core.checks import Error, Tags, register + + +@register(Tags.models) +def check_all_models(app_configs=None, **kwargs): + errors = [] + if app_configs is None: + models = apps.get_models() + else: + models = chain.from_iterable(app_config.get_models() for app_config in app_configs) + for model in models: + if not inspect.ismethod(model.check): + errors.append( + Error( + "The '%s.check()' class method is currently overridden by %r." + % (model.__name__, model.check), + obj=model, + id='models.E020' + ) + ) + else: + errors.extend(model.check(**kwargs)) + return errors + + +def _check_lazy_references(apps, ignore=None): + """ + Ensure all lazy (i.e. string) model references have been resolved. + + Lazy references are used in various places throughout Django, primarily in + related fields and model signals. Identify those common cases and provide + more helpful error messages for them. + + The ignore parameter is used by StateApps to exclude swappable models from + this check. + """ + pending_models = set(apps._pending_operations) - (ignore or set()) + + # Short circuit if there aren't any errors. + if not pending_models: + return [] + + from django.db.models import signals + model_signals = { + signal: name for name, signal in vars(signals).items() + if isinstance(signal, signals.ModelSignal) + } + + def extract_operation(obj): + """ + Take a callable found in Apps._pending_operations and identify the + original callable passed to Apps.lazy_model_operation(). If that + callable was a partial, return the inner, non-partial function and + any arguments and keyword arguments that were supplied with it. + + obj is a callback defined locally in Apps.lazy_model_operation() and + annotated there with a `func` attribute so as to imitate a partial. + """ + operation, args, keywords = obj, [], {} + while hasattr(operation, 'func'): + # The or clauses are redundant but work around a bug (#25945) in + # functools.partial in Python <= 3.5.1. + args.extend(getattr(operation, 'args', []) or []) + keywords.update(getattr(operation, 'keywords', {}) or {}) + operation = operation.func + return operation, args, keywords + + def app_model_error(model_key): + try: + apps.get_app_config(model_key[0]) + model_error = "app '%s' doesn't provide model '%s'" % model_key + except LookupError: + model_error = "app '%s' isn't installed" % model_key[0] + return model_error + + # Here are several functions which return CheckMessage instances for the + # most common usages of lazy operations throughout Django. These functions + # take the model that was being waited on as an (app_label, modelname) + # pair, the original lazy function, and its positional and keyword args as + # determined by extract_operation(). + + def field_error(model_key, func, args, keywords): + error_msg = ( + "The field %(field)s was declared with a lazy reference " + "to '%(model)s', but %(model_error)s." + ) + params = { + 'model': '.'.join(model_key), + 'field': keywords['field'], + 'model_error': app_model_error(model_key), + } + return Error(error_msg % params, obj=keywords['field'], id='fields.E307') + + def signal_connect_error(model_key, func, args, keywords): + error_msg = ( + "%(receiver)s was connected to the '%(signal)s' signal with a " + "lazy reference to the sender '%(model)s', but %(model_error)s." + ) + receiver = args[0] + # The receiver is either a function or an instance of class + # defining a `__call__` method. + if isinstance(receiver, types.FunctionType): + description = "The function '%s'" % receiver.__name__ + elif isinstance(receiver, types.MethodType): + description = "Bound method '%s.%s'" % (receiver.__self__.__class__.__name__, receiver.__name__) + else: + description = "An instance of class '%s'" % receiver.__class__.__name__ + signal_name = model_signals.get(func.__self__, 'unknown') + params = { + 'model': '.'.join(model_key), + 'receiver': description, + 'signal': signal_name, + 'model_error': app_model_error(model_key), + } + return Error(error_msg % params, obj=receiver.__module__, id='signals.E001') + + def default_error(model_key, func, args, keywords): + error_msg = "%(op)s contains a lazy reference to %(model)s, but %(model_error)s." + params = { + 'op': func, + 'model': '.'.join(model_key), + 'model_error': app_model_error(model_key), + } + return Error(error_msg % params, obj=func, id='models.E022') + + # Maps common uses of lazy operations to corresponding error functions + # defined above. If a key maps to None, no error will be produced. + # default_error() will be used for usages that don't appear in this dict. + known_lazy = { + ('django.db.models.fields.related', 'resolve_related_class'): field_error, + ('django.db.models.fields.related', 'set_managed'): None, + ('django.dispatch.dispatcher', 'connect'): signal_connect_error, + } + + def build_error(model_key, func, args, keywords): + key = (func.__module__, func.__name__) + error_fn = known_lazy.get(key, default_error) + return error_fn(model_key, func, args, keywords) if error_fn else None + + return sorted(filter(None, ( + build_error(model_key, *extract_operation(func)) + for model_key in pending_models + for func in apps._pending_operations[model_key] + )), key=lambda error: error.msg) + + +@register(Tags.models) +def check_lazy_references(app_configs=None, **kwargs): + return _check_lazy_references(apps) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/registry.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..c580c804835cea77a3a180b65138cb3e774f5210 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/registry.py @@ -0,0 +1,98 @@ +from itertools import chain + +from django.utils.itercompat import is_iterable + + +class Tags: + """ + Built-in tags for internal checks. + """ + admin = 'admin' + caches = 'caches' + compatibility = 'compatibility' + database = 'database' + models = 'models' + security = 'security' + signals = 'signals' + templates = 'templates' + urls = 'urls' + + +class CheckRegistry: + + def __init__(self): + self.registered_checks = set() + self.deployment_checks = set() + + def register(self, check=None, *tags, **kwargs): + """ + Can be used as a function or a decorator. Register given function + `f` labeled with given `tags`. The function should receive **kwargs + and return list of Errors and Warnings. + + Example:: + + registry = CheckRegistry() + @registry.register('mytag', 'anothertag') + def my_check(apps, **kwargs): + # ... perform checks and collect `errors` ... + return errors + # or + registry.register(my_check, 'mytag', 'anothertag') + """ + kwargs.setdefault('deploy', False) + + def inner(check): + check.tags = tags + checks = self.deployment_checks if kwargs['deploy'] else self.registered_checks + checks.add(check) + return check + + if callable(check): + return inner(check) + else: + if check: + tags += (check, ) + return inner + + def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False): + """ + Run all registered checks and return list of Errors and Warnings. + """ + errors = [] + checks = self.get_checks(include_deployment_checks) + + if tags is not None: + checks = [check for check in checks if not set(check.tags).isdisjoint(tags)] + else: + # By default, 'database'-tagged checks are not run as they do more + # than mere static code analysis. + checks = [check for check in checks if Tags.database not in check.tags] + + for check in checks: + new_errors = check(app_configs=app_configs) + assert is_iterable(new_errors), ( + "The function %r did not return a list. All functions registered " + "with the checks registry must return a list." % check) + errors.extend(new_errors) + return errors + + def tag_exists(self, tag, include_deployment_checks=False): + return tag in self.tags_available(include_deployment_checks) + + def tags_available(self, deployment_checks=False): + return set(chain.from_iterable( + check.tags for check in self.get_checks(deployment_checks) + )) + + def get_checks(self, include_deployment_checks=False): + checks = list(self.registered_checks) + if include_deployment_checks: + checks.extend(self.deployment_checks) + return checks + + +registry = CheckRegistry() +register = registry.register +run_checks = registry.run_checks +tag_exists = registry.tag_exists diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/base.py new file mode 100644 index 0000000000000000000000000000000000000000..bc804c53df5bf64fd3e973e3a6efa31e944bf719 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/base.py @@ -0,0 +1,210 @@ +from django.conf import settings + +from .. import Tags, Warning, register + +SECRET_KEY_MIN_LENGTH = 50 +SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5 + +W001 = Warning( + "You do not have 'django.middleware.security.SecurityMiddleware' " + "in your MIDDLEWARE so the SECURE_HSTS_SECONDS, " + "SECURE_CONTENT_TYPE_NOSNIFF, " + "SECURE_BROWSER_XSS_FILTER, and SECURE_SSL_REDIRECT settings " + "will have no effect.", + id='security.W001', +) + +W002 = Warning( + "You do not have " + "'django.middleware.clickjacking.XFrameOptionsMiddleware' in your " + "MIDDLEWARE, so your pages will not be served with an " + "'x-frame-options' header. Unless there is a good reason for your " + "site to be served in a frame, you should consider enabling this " + "header to help prevent clickjacking attacks.", + id='security.W002', +) + +W004 = Warning( + "You have not set a value for the SECURE_HSTS_SECONDS setting. " + "If your entire site is served only over SSL, you may want to consider " + "setting a value and enabling HTTP Strict Transport Security. " + "Be sure to read the documentation first; enabling HSTS carelessly " + "can cause serious, irreversible problems.", + id='security.W004', +) + +W005 = Warning( + "You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. " + "Without this, your site is potentially vulnerable to attack " + "via an insecure connection to a subdomain. Only set this to True if " + "you are certain that all subdomains of your domain should be served " + "exclusively via SSL.", + id='security.W005', +) + +W006 = Warning( + "Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, " + "so your pages will not be served with an " + "'x-content-type-options: nosniff' header. " + "You should consider enabling this header to prevent the " + "browser from identifying content types incorrectly.", + id='security.W006', +) + +W007 = Warning( + "Your SECURE_BROWSER_XSS_FILTER setting is not set to True, " + "so your pages will not be served with an " + "'x-xss-protection: 1; mode=block' header. " + "You should consider enabling this header to activate the " + "browser's XSS filtering and help prevent XSS attacks.", + id='security.W007', +) + +W008 = Warning( + "Your SECURE_SSL_REDIRECT setting is not set to True. " + "Unless your site should be available over both SSL and non-SSL " + "connections, you may want to either set this setting True " + "or configure a load balancer or reverse-proxy server " + "to redirect all connections to HTTPS.", + id='security.W008', +) + +W009 = Warning( + "Your SECRET_KEY has less than %(min_length)s characters or less than " + "%(min_unique_chars)s unique characters. Please generate a long and random " + "SECRET_KEY, otherwise many of Django's security-critical features will be " + "vulnerable to attack." % { + 'min_length': SECRET_KEY_MIN_LENGTH, + 'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS, + }, + id='security.W009', +) + +W018 = Warning( + "You should not have DEBUG set to True in deployment.", + id='security.W018', +) + +W019 = Warning( + "You have " + "'django.middleware.clickjacking.XFrameOptionsMiddleware' in your " + "MIDDLEWARE, but X_FRAME_OPTIONS is not set to 'DENY'. " + "The default is 'SAMEORIGIN', but unless there is a good reason for " + "your site to serve other parts of itself in a frame, you should " + "change it to 'DENY'.", + id='security.W019', +) + +W020 = Warning( + "ALLOWED_HOSTS must not be empty in deployment.", + id='security.W020', +) + +W021 = Warning( + "You have not set the SECURE_HSTS_PRELOAD setting to True. Without this, " + "your site cannot be submitted to the browser preload list.", + id='security.W021', +) + + +def _security_middleware(): + return 'django.middleware.security.SecurityMiddleware' in settings.MIDDLEWARE + + +def _xframe_middleware(): + return 'django.middleware.clickjacking.XFrameOptionsMiddleware' in settings.MIDDLEWARE + + +@register(Tags.security, deploy=True) +def check_security_middleware(app_configs, **kwargs): + passed_check = _security_middleware() + return [] if passed_check else [W001] + + +@register(Tags.security, deploy=True) +def check_xframe_options_middleware(app_configs, **kwargs): + passed_check = _xframe_middleware() + return [] if passed_check else [W002] + + +@register(Tags.security, deploy=True) +def check_sts(app_configs, **kwargs): + passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS + return [] if passed_check else [W004] + + +@register(Tags.security, deploy=True) +def check_sts_include_subdomains(app_configs, **kwargs): + passed_check = ( + not _security_middleware() or + not settings.SECURE_HSTS_SECONDS or + settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True + ) + return [] if passed_check else [W005] + + +@register(Tags.security, deploy=True) +def check_sts_preload(app_configs, **kwargs): + passed_check = ( + not _security_middleware() or + not settings.SECURE_HSTS_SECONDS or + settings.SECURE_HSTS_PRELOAD is True + ) + return [] if passed_check else [W021] + + +@register(Tags.security, deploy=True) +def check_content_type_nosniff(app_configs, **kwargs): + passed_check = ( + not _security_middleware() or + settings.SECURE_CONTENT_TYPE_NOSNIFF is True + ) + return [] if passed_check else [W006] + + +@register(Tags.security, deploy=True) +def check_xss_filter(app_configs, **kwargs): + passed_check = ( + not _security_middleware() or + settings.SECURE_BROWSER_XSS_FILTER is True + ) + return [] if passed_check else [W007] + + +@register(Tags.security, deploy=True) +def check_ssl_redirect(app_configs, **kwargs): + passed_check = ( + not _security_middleware() or + settings.SECURE_SSL_REDIRECT is True + ) + return [] if passed_check else [W008] + + +@register(Tags.security, deploy=True) +def check_secret_key(app_configs, **kwargs): + passed_check = ( + getattr(settings, 'SECRET_KEY', None) and + len(set(settings.SECRET_KEY)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and + len(settings.SECRET_KEY) >= SECRET_KEY_MIN_LENGTH + ) + return [] if passed_check else [W009] + + +@register(Tags.security, deploy=True) +def check_debug(app_configs, **kwargs): + passed_check = not settings.DEBUG + return [] if passed_check else [W018] + + +@register(Tags.security, deploy=True) +def check_xframe_deny(app_configs, **kwargs): + passed_check = ( + not _xframe_middleware() or + settings.X_FRAME_OPTIONS == 'DENY' + ) + return [] if passed_check else [W019] + + +@register(Tags.security, deploy=True) +def check_allowed_hosts(app_configs, **kwargs): + return [] if settings.ALLOWED_HOSTS else [W020] diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/csrf.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/csrf.py new file mode 100644 index 0000000000000000000000000000000000000000..75c9813e7f6bc09d5d16ec46fe9874979f8a2d16 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/csrf.py @@ -0,0 +1,40 @@ +from django.conf import settings + +from .. import Tags, Warning, register + +W003 = Warning( + "You don't appear to be using Django's built-in " + "cross-site request forgery protection via the middleware " + "('django.middleware.csrf.CsrfViewMiddleware' is not in your " + "MIDDLEWARE). Enabling the middleware is the safest approach " + "to ensure you don't leave any holes.", + id='security.W003', +) + +W016 = Warning( + "You have 'django.middleware.csrf.CsrfViewMiddleware' in your " + "MIDDLEWARE, but you have not set CSRF_COOKIE_SECURE to True. " + "Using a secure-only CSRF cookie makes it more difficult for network " + "traffic sniffers to steal the CSRF token.", + id='security.W016', +) + + +def _csrf_middleware(): + return 'django.middleware.csrf.CsrfViewMiddleware' in settings.MIDDLEWARE + + +@register(Tags.security, deploy=True) +def check_csrf_middleware(app_configs, **kwargs): + passed_check = _csrf_middleware() + return [] if passed_check else [W003] + + +@register(Tags.security, deploy=True) +def check_csrf_cookie_secure(app_configs, **kwargs): + passed_check = ( + settings.CSRF_USE_SESSIONS or + not _csrf_middleware() or + settings.CSRF_COOKIE_SECURE + ) + return [] if passed_check else [W016] diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/sessions.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..1f31a167fad8d441a45c29e72bbc81e0d99e2626 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/security/sessions.py @@ -0,0 +1,97 @@ +from django.conf import settings + +from .. import Tags, Warning, register + + +def add_session_cookie_message(message): + return message + ( + " Using a secure-only session cookie makes it more difficult for " + "network traffic sniffers to hijack user sessions." + ) + + +W010 = Warning( + add_session_cookie_message( + "You have 'django.contrib.sessions' in your INSTALLED_APPS, " + "but you have not set SESSION_COOKIE_SECURE to True." + ), + id='security.W010', +) + +W011 = Warning( + add_session_cookie_message( + "You have 'django.contrib.sessions.middleware.SessionMiddleware' " + "in your MIDDLEWARE, but you have not set " + "SESSION_COOKIE_SECURE to True." + ), + id='security.W011', +) + +W012 = Warning( + add_session_cookie_message("SESSION_COOKIE_SECURE is not set to True."), + id='security.W012', +) + + +def add_httponly_message(message): + return message + ( + " Using an HttpOnly session cookie makes it more difficult for " + "cross-site scripting attacks to hijack user sessions." + ) + + +W013 = Warning( + add_httponly_message( + "You have 'django.contrib.sessions' in your INSTALLED_APPS, " + "but you have not set SESSION_COOKIE_HTTPONLY to True.", + ), + id='security.W013', +) + +W014 = Warning( + add_httponly_message( + "You have 'django.contrib.sessions.middleware.SessionMiddleware' " + "in your MIDDLEWARE, but you have not set " + "SESSION_COOKIE_HTTPONLY to True." + ), + id='security.W014', +) + +W015 = Warning( + add_httponly_message("SESSION_COOKIE_HTTPONLY is not set to True."), + id='security.W015', +) + + +@register(Tags.security, deploy=True) +def check_session_cookie_secure(app_configs, **kwargs): + errors = [] + if not settings.SESSION_COOKIE_SECURE: + if _session_app(): + errors.append(W010) + if _session_middleware(): + errors.append(W011) + if len(errors) > 1: + errors = [W012] + return errors + + +@register(Tags.security, deploy=True) +def check_session_cookie_httponly(app_configs, **kwargs): + errors = [] + if not settings.SESSION_COOKIE_HTTPONLY: + if _session_app(): + errors.append(W013) + if _session_middleware(): + errors.append(W014) + if len(errors) > 1: + errors = [W015] + return errors + + +def _session_middleware(): + return 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE + + +def _session_app(): + return "django.contrib.sessions" in settings.INSTALLED_APPS diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/templates.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/templates.py new file mode 100644 index 0000000000000000000000000000000000000000..6f60d33bd0df579e16f089aba1d49a88c2e5e5a3 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/templates.py @@ -0,0 +1,38 @@ +import copy + +from django.conf import settings + +from . import Error, Tags, register + +E001 = Error( + "You have 'APP_DIRS': True in your TEMPLATES but also specify 'loaders' " + "in OPTIONS. Either remove APP_DIRS or remove the 'loaders' option.", + id='templates.E001', +) +E002 = Error( + "'string_if_invalid' in TEMPLATES OPTIONS must be a string but got: {} ({}).", + id="templates.E002", +) + + +@register(Tags.templates) +def check_setting_app_dirs_loaders(app_configs, **kwargs): + passed_check = True + for conf in settings.TEMPLATES: + if not conf.get('APP_DIRS'): + continue + if 'loaders' in conf.get('OPTIONS', {}): + passed_check = False + return [] if passed_check else [E001] + + +@register(Tags.templates) +def check_string_if_invalid_is_string(app_configs, **kwargs): + errors = [] + for conf in settings.TEMPLATES: + string_if_invalid = conf.get('OPTIONS', {}).get('string_if_invalid', '') + if not isinstance(string_if_invalid, str): + error = copy.copy(E002) + error.msg = error.msg.format(string_if_invalid, type(string_if_invalid).__name__) + errors.append(error) + return errors diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/urls.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/urls.py new file mode 100644 index 0000000000000000000000000000000000000000..e51ca3fc1fa6d7515f87c5a8585d2f1e8f99c20b --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/checks/urls.py @@ -0,0 +1,110 @@ +from collections import Counter + +from django.conf import settings + +from . import Error, Tags, Warning, register + + +@register(Tags.urls) +def check_url_config(app_configs, **kwargs): + if getattr(settings, 'ROOT_URLCONF', None): + from django.urls import get_resolver + resolver = get_resolver() + return check_resolver(resolver) + return [] + + +def check_resolver(resolver): + """ + Recursively check the resolver. + """ + check_method = getattr(resolver, 'check', None) + if check_method is not None: + return check_method() + elif not hasattr(resolver, 'resolve'): + return get_warning_for_invalid_pattern(resolver) + else: + return [] + + +@register(Tags.urls) +def check_url_namespaces_unique(app_configs, **kwargs): + """ + Warn if URL namespaces used in applications aren't unique. + """ + if not getattr(settings, 'ROOT_URLCONF', None): + return [] + + from django.urls import get_resolver + resolver = get_resolver() + all_namespaces = _load_all_namespaces(resolver) + counter = Counter(all_namespaces) + non_unique_namespaces = [n for n, count in counter.items() if count > 1] + errors = [] + for namespace in non_unique_namespaces: + errors.append(Warning( + "URL namespace '{}' isn't unique. You may not be able to reverse " + "all URLs in this namespace".format(namespace), + id="urls.W005", + )) + return errors + + +def _load_all_namespaces(resolver, parents=()): + """ + Recursively load all namespaces from URL patterns. + """ + url_patterns = getattr(resolver, 'url_patterns', []) + namespaces = [ + ':'.join(parents + (url.namespace,)) for url in url_patterns + if getattr(url, 'namespace', None) is not None + ] + for pattern in url_patterns: + namespace = getattr(pattern, 'namespace', None) + current = parents + if namespace is not None: + current += (namespace,) + namespaces.extend(_load_all_namespaces(pattern, current)) + return namespaces + + +def get_warning_for_invalid_pattern(pattern): + """ + Return a list containing a warning that the pattern is invalid. + + describe_pattern() cannot be used here, because we cannot rely on the + urlpattern having regex or name attributes. + """ + if isinstance(pattern, str): + hint = ( + "Try removing the string '{}'. The list of urlpatterns should not " + "have a prefix string as the first element.".format(pattern) + ) + elif isinstance(pattern, tuple): + hint = "Try using path() instead of a tuple." + else: + hint = None + + return [Error( + "Your URL pattern {!r} is invalid. Ensure that urlpatterns is a list " + "of path() and/or re_path() instances.".format(pattern), + hint=hint, + id="urls.E004", + )] + + +@register(Tags.urls) +def check_url_settings(app_configs, **kwargs): + errors = [] + for name in ('STATIC_URL', 'MEDIA_URL'): + value = getattr(settings, name) + if value and not value.endswith('/'): + errors.append(E006(name)) + return errors + + +def E006(name): + return Error( + 'The {} setting must end with a slash.'.format(name), + id='urls.E006', + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/exceptions.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0e85397b9c74ebd3bbd8a3560e0e33263d98957c --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/exceptions.py @@ -0,0 +1,183 @@ +""" +Global Django exception and warning classes. +""" + + +class FieldDoesNotExist(Exception): + """The requested model field does not exist""" + pass + + +class AppRegistryNotReady(Exception): + """The django.apps registry is not populated yet""" + pass + + +class ObjectDoesNotExist(Exception): + """The requested object does not exist""" + silent_variable_failure = True + + +class MultipleObjectsReturned(Exception): + """The query returned multiple objects when only one was expected.""" + pass + + +class SuspiciousOperation(Exception): + """The user did something suspicious""" + + +class SuspiciousMultipartForm(SuspiciousOperation): + """Suspect MIME request in multipart form data""" + pass + + +class SuspiciousFileOperation(SuspiciousOperation): + """A Suspicious filesystem operation was attempted""" + pass + + +class DisallowedHost(SuspiciousOperation): + """HTTP_HOST header contains invalid value""" + pass + + +class DisallowedRedirect(SuspiciousOperation): + """Redirect to scheme not in allowed list""" + pass + + +class TooManyFieldsSent(SuspiciousOperation): + """ + The number of fields in a GET or POST request exceeded + settings.DATA_UPLOAD_MAX_NUMBER_FIELDS. + """ + pass + + +class RequestDataTooBig(SuspiciousOperation): + """ + The size of the request (excluding any file uploads) exceeded + settings.DATA_UPLOAD_MAX_MEMORY_SIZE. + """ + pass + + +class PermissionDenied(Exception): + """The user did not have permission to do that""" + pass + + +class ViewDoesNotExist(Exception): + """The requested view does not exist""" + pass + + +class MiddlewareNotUsed(Exception): + """This middleware is not used in this server configuration""" + pass + + +class ImproperlyConfigured(Exception): + """Django is somehow improperly configured""" + pass + + +class FieldError(Exception): + """Some kind of problem with a model field.""" + pass + + +NON_FIELD_ERRORS = '__all__' + + +class ValidationError(Exception): + """An error while validating data.""" + def __init__(self, message, code=None, params=None): + """ + The `message` argument can be a single error, a list of errors, or a + dictionary that maps field names to lists of errors. What we define as + an "error" can be either a simple string or an instance of + ValidationError with its message attribute set, and what we define as + list or dictionary can be an actual `list` or `dict` or an instance + of ValidationError with its `error_list` or `error_dict` attribute set. + """ + super().__init__(message, code, params) + + if isinstance(message, ValidationError): + if hasattr(message, 'error_dict'): + message = message.error_dict + elif not hasattr(message, 'message'): + message = message.error_list + else: + message, code, params = message.message, message.code, message.params + + if isinstance(message, dict): + self.error_dict = {} + for field, messages in message.items(): + if not isinstance(messages, ValidationError): + messages = ValidationError(messages) + self.error_dict[field] = messages.error_list + + elif isinstance(message, list): + self.error_list = [] + for message in message: + # Normalize plain strings to instances of ValidationError. + if not isinstance(message, ValidationError): + message = ValidationError(message) + if hasattr(message, 'error_dict'): + self.error_list.extend(sum(message.error_dict.values(), [])) + else: + self.error_list.extend(message.error_list) + + else: + self.message = message + self.code = code + self.params = params + self.error_list = [self] + + @property + def message_dict(self): + # Trigger an AttributeError if this ValidationError + # doesn't have an error_dict. + getattr(self, 'error_dict') + + return dict(self) + + @property + def messages(self): + if hasattr(self, 'error_dict'): + return sum(dict(self).values(), []) + return list(self) + + def update_error_dict(self, error_dict): + if hasattr(self, 'error_dict'): + for field, error_list in self.error_dict.items(): + error_dict.setdefault(field, []).extend(error_list) + else: + error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list) + return error_dict + + def __iter__(self): + if hasattr(self, 'error_dict'): + for field, errors in self.error_dict.items(): + yield field, list(ValidationError(errors)) + else: + for error in self.error_list: + message = error.message + if error.params: + message %= error.params + yield str(message) + + def __str__(self): + if hasattr(self, 'error_dict'): + return repr(dict(self)) + return repr(list(self)) + + def __repr__(self): + return 'ValidationError(%s)' % self + + +class EmptyResultSet(Exception): + """A database query predicate is impossible.""" + pass diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58a6fd8f8537d7479a53d7232d1aea6b58b841fb --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/__init__.py @@ -0,0 +1,3 @@ +from django.core.files.base import File + +__all__ = ['File'] diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/base.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6332f0b62602affd5c75c5b9490911dd022b69 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/base.py @@ -0,0 +1,169 @@ +import os +from io import BytesIO, StringIO, UnsupportedOperation + +from django.core.files.utils import FileProxyMixin + + +class File(FileProxyMixin): + DEFAULT_CHUNK_SIZE = 64 * 2 ** 10 + + def __init__(self, file, name=None): + self.file = file + if name is None: + name = getattr(file, 'name', None) + self.name = name + if hasattr(file, 'mode'): + self.mode = file.mode + + def __str__(self): + return self.name or '' + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self or "None") + + def __bool__(self): + return bool(self.name) + + def __len__(self): + return self.size + + def _get_size_from_underlying_file(self): + if hasattr(self.file, 'size'): + return self.file.size + if hasattr(self.file, 'name'): + try: + return os.path.getsize(self.file.name) + except (OSError, TypeError): + pass + if hasattr(self.file, 'tell') and hasattr(self.file, 'seek'): + pos = self.file.tell() + self.file.seek(0, os.SEEK_END) + size = self.file.tell() + self.file.seek(pos) + return size + raise AttributeError("Unable to determine the file's size.") + + def _get_size(self): + if hasattr(self, '_size'): + return self._size + self._size = self._get_size_from_underlying_file() + return self._size + + def _set_size(self, size): + self._size = size + + size = property(_get_size, _set_size) + + def chunks(self, chunk_size=None): + """ + Read the file and yield chunks of ``chunk_size`` bytes (defaults to + ``UploadedFile.DEFAULT_CHUNK_SIZE``). + """ + if not chunk_size: + chunk_size = self.DEFAULT_CHUNK_SIZE + + try: + self.seek(0) + except (AttributeError, UnsupportedOperation): + pass + + while True: + data = self.read(chunk_size) + if not data: + break + yield data + + def multiple_chunks(self, chunk_size=None): + """ + Return ``True`` if you can expect multiple chunks. + + NB: If a particular file representation is in memory, subclasses should + always return ``False`` -- there's no good reason to read from memory in + chunks. + """ + if not chunk_size: + chunk_size = self.DEFAULT_CHUNK_SIZE + return self.size > chunk_size + + def __iter__(self): + # Iterate over this file-like object by newlines + buffer_ = None + for chunk in self.chunks(): + for line in chunk.splitlines(True): + if buffer_: + if endswith_cr(buffer_) and not equals_lf(line): + # Line split after a \r newline; yield buffer_. + yield buffer_ + # Continue with line. + else: + # Line either split without a newline (line + # continues after buffer_) or with \r\n + # newline (line == b'\n'). + line = buffer_ + line + # buffer_ handled, clear it. + buffer_ = None + + # If this is the end of a \n or \r\n line, yield. + if endswith_lf(line): + yield line + else: + buffer_ = line + + if buffer_ is not None: + yield buffer_ + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close() + + def open(self, mode=None): + if not self.closed: + self.seek(0) + elif self.name and os.path.exists(self.name): + self.file = open(self.name, mode or self.mode) + else: + raise ValueError("The file cannot be reopened.") + return self + + def close(self): + self.file.close() + + +class ContentFile(File): + """ + A File-like object that take just raw content, rather than an actual file. + """ + def __init__(self, content, name=None): + stream_class = StringIO if isinstance(content, str) else BytesIO + super().__init__(stream_class(content), name=name) + self.size = len(content) + + def __str__(self): + return 'Raw content' + + def __bool__(self): + return True + + def open(self, mode=None): + self.seek(0) + return self + + def close(self): + pass + + +def endswith_cr(line): + """Return True if line (a text or byte string) ends with '\r'.""" + return line.endswith('\r' if isinstance(line, str) else b'\r') + + +def endswith_lf(line): + """Return True if line (a text or byte string) ends with '\n'.""" + return line.endswith('\n' if isinstance(line, str) else b'\n') + + +def equals_lf(line): + """Return True if line (a text or byte string) equals '\n'.""" + return line == ('\n' if isinstance(line, str) else b'\n') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/images.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/images.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb89de2cc68860297cb71a55e0083fd57e2523e --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/images.py @@ -0,0 +1,80 @@ +""" +Utility functions for handling images. + +Requires Pillow as you might imagine. +""" +import struct +import zlib + +from django.core.files import File + + +class ImageFile(File): + """ + A mixin for use alongside django.core.files.base.File, which provides + additional features for dealing with images. + """ + @property + def width(self): + return self._get_image_dimensions()[0] + + @property + def height(self): + return self._get_image_dimensions()[1] + + def _get_image_dimensions(self): + if not hasattr(self, '_dimensions_cache'): + close = self.closed + self.open() + self._dimensions_cache = get_image_dimensions(self, close=close) + return self._dimensions_cache + + +def get_image_dimensions(file_or_path, close=False): + """ + Return the (width, height) of an image, given an open file or a path. Set + 'close' to True to close the file at the end if it is initially in an open + state. + """ + from PIL import ImageFile as PillowImageFile + + p = PillowImageFile.Parser() + if hasattr(file_or_path, 'read'): + file = file_or_path + file_pos = file.tell() + file.seek(0) + else: + file = open(file_or_path, 'rb') + close = True + try: + # Most of the time Pillow only needs a small chunk to parse the image + # and get the dimensions, but with some TIFF files Pillow needs to + # parse the whole file. + chunk_size = 1024 + while 1: + data = file.read(chunk_size) + if not data: + break + try: + p.feed(data) + except zlib.error as e: + # ignore zlib complaining on truncated stream, just feed more + # data to parser (ticket #19457). + if e.args[0].startswith("Error -5"): + pass + else: + raise + except struct.error: + # Ignore PIL failing on a too short buffer when reads return + # less bytes than expected. Skip and feed more data to the + # parser (ticket #24544). + pass + if p.image: + return p.image.size + chunk_size *= 2 + return (None, None) + finally: + if close: + file.close() + else: + file.seek(file_pos) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/locks.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/locks.py new file mode 100644 index 0000000000000000000000000000000000000000..63c7fda9bbb7f865ed2482bc6621fc278e2bd9ce --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/locks.py @@ -0,0 +1,113 @@ +""" +Portable file locking utilities. + +Based partially on an example by Jonathan Feignberg in the Python +Cookbook [1] (licensed under the Python Software License) and a ctypes port by +Anatoly Techtonik for Roundup [2] (license [3]). + +[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203 +[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py +[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt + +Example Usage:: + + >>> from django.core.files import locks + >>> with open('./file', 'wb') as f: + ... locks.lock(f, locks.LOCK_EX) + ... f.write('Django') +""" +import os + +__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock') + + +def _fd(f): + """Get a filedescriptor from something which could be a file or an fd.""" + return f.fileno() if hasattr(f, 'fileno') else f + + +if os.name == 'nt': + import msvcrt + from ctypes import (sizeof, c_ulong, c_void_p, c_int64, + Structure, Union, POINTER, windll, byref) + from ctypes.wintypes import BOOL, DWORD, HANDLE + + LOCK_SH = 0 # the default + LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY + LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK + + # --- Adapted from the pyserial project --- + # detect size of ULONG_PTR + if sizeof(c_ulong) != sizeof(c_void_p): + ULONG_PTR = c_int64 + else: + ULONG_PTR = c_ulong + PVOID = c_void_p + + # --- Union inside Structure by stackoverflow:3480240 --- + class _OFFSET(Structure): + _fields_ = [ + ('Offset', DWORD), + ('OffsetHigh', DWORD)] + + class _OFFSET_UNION(Union): + _anonymous_ = ['_offset'] + _fields_ = [ + ('_offset', _OFFSET), + ('Pointer', PVOID)] + + class OVERLAPPED(Structure): + _anonymous_ = ['_offset_union'] + _fields_ = [ + ('Internal', ULONG_PTR), + ('InternalHigh', ULONG_PTR), + ('_offset_union', _OFFSET_UNION), + ('hEvent', HANDLE)] + + LPOVERLAPPED = POINTER(OVERLAPPED) + + # --- Define function prototypes for extra safety --- + LockFileEx = windll.kernel32.LockFileEx + LockFileEx.restype = BOOL + LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED] + UnlockFileEx = windll.kernel32.UnlockFileEx + UnlockFileEx.restype = BOOL + UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED] + + def lock(f, flags): + hfile = msvcrt.get_osfhandle(_fd(f)) + overlapped = OVERLAPPED() + ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped)) + return bool(ret) + + def unlock(f): + hfile = msvcrt.get_osfhandle(_fd(f)) + overlapped = OVERLAPPED() + ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped)) + return bool(ret) +else: + try: + import fcntl + LOCK_SH = fcntl.LOCK_SH # shared lock + LOCK_NB = fcntl.LOCK_NB # non-blocking + LOCK_EX = fcntl.LOCK_EX + except (ImportError, AttributeError): + # File locking is not supported. + LOCK_EX = LOCK_SH = LOCK_NB = 0 + + # Dummy functions that don't do anything. + def lock(f, flags): + # File is not locked + return False + + def unlock(f): + # File is unlocked + return True + else: + def lock(f, flags): + ret = fcntl.flock(_fd(f), flags) + return ret == 0 + + def unlock(f): + ret = fcntl.flock(_fd(f), fcntl.LOCK_UN) + return ret == 0 diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/move.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/move.py new file mode 100644 index 0000000000000000000000000000000000000000..4d791ac263b8e2d8d39a49697a9d6f18bcecab3d --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/move.py @@ -0,0 +1,87 @@ +""" +Move a file in the safest way possible:: + + >>> from django.core.files.move import file_move_safe + >>> file_move_safe("/tmp/old_file", "/tmp/new_file") +""" + +import errno +import os +from shutil import copystat + +from django.core.files import locks + +__all__ = ['file_move_safe'] + + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + + +def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False): + """ + Move a file from one location to another in the safest way possible. + + First, try ``os.rename``, which is simple but will break across filesystems. + If that fails, stream manually from one file to another in pure Python. + + If the destination file exists and ``allow_overwrite`` is ``False``, raise + ``IOError``. + """ + # There's no reason to move if we don't have to. + if _samefile(old_file_name, new_file_name): + return + + try: + if not allow_overwrite and os.access(new_file_name, os.F_OK): + raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name) + + os.rename(old_file_name, new_file_name) + return + except OSError: + # OSError happens with os.rename() if moving to another filesystem or + # when moving opened files on certain operating systems. + pass + + # first open the old file, so that it won't go away + with open(old_file_name, 'rb') as old_file: + # now open the new file, not forgetting allow_overwrite + fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) | + (os.O_EXCL if not allow_overwrite else 0))) + try: + locks.lock(fd, locks.LOCK_EX) + current_chunk = None + while current_chunk != b'': + current_chunk = old_file.read(chunk_size) + os.write(fd, current_chunk) + finally: + locks.unlock(fd) + os.close(fd) + + try: + copystat(old_file_name, new_file_name) + except PermissionError as e: + # Certain filesystems (e.g. CIFS) fail to copy the file's metadata if + # the type of the destination filesystem isn't the same as the source + # filesystem; ignore that. + if e.errno != errno.EPERM: + raise + + try: + os.remove(old_file_name) + except PermissionError as e: + # Certain operating systems (Cygwin and Windows) + # fail when deleting opened files, ignore it. (For the + # systems where this happens, temporary files will be auto-deleted + # on close anyway.) + if getattr(e, 'winerror', 0) != 32: + raise diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/storage.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..30788d6d7502c26a2d3beee223320a95e32e4585 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/storage.py @@ -0,0 +1,364 @@ +import os +from datetime import datetime +from urllib.parse import urljoin + +from django.conf import settings +from django.core.exceptions import SuspiciousFileOperation +from django.core.files import File, locks +from django.core.files.move import file_move_safe +from django.core.signals import setting_changed +from django.utils import timezone +from django.utils._os import safe_join +from django.utils.crypto import get_random_string +from django.utils.deconstruct import deconstructible +from django.utils.encoding import filepath_to_uri +from django.utils.functional import LazyObject, cached_property +from django.utils.module_loading import import_string +from django.utils.text import get_valid_filename + +__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage') + + +class Storage: + """ + A base storage class, providing some default behaviors that all other + storage systems can inherit or override, as necessary. + """ + + # The following methods represent a public interface to private methods. + # These shouldn't be overridden by subclasses unless absolutely necessary. + + def open(self, name, mode='rb'): + """Retrieve the specified file from storage.""" + return self._open(name, mode) + + def save(self, name, content, max_length=None): + """ + Save new content to the file specified by name. The content should be + a proper File object or any python file-like object, ready to be read + from the beginning. + """ + # Get the proper name for the file, as it will actually be saved. + if name is None: + name = content.name + + if not hasattr(content, 'chunks'): + content = File(content, name) + + name = self.get_available_name(name, max_length=max_length) + return self._save(name, content) + + # These methods are part of the public API, with default implementations. + + def get_valid_name(self, name): + """ + Return a filename, based on the provided filename, that's suitable for + use in the target storage system. + """ + return get_valid_filename(name) + + def get_available_name(self, name, max_length=None): + """ + Return a filename that's free on the target storage system and + available for new content to be written to. + """ + dir_name, file_name = os.path.split(name) + file_root, file_ext = os.path.splitext(file_name) + # If the filename already exists, add an underscore and a random 7 + # character alphanumeric string (before the file extension, if one + # exists) to the filename until the generated filename doesn't exist. + # Truncate original name if required, so the new filename does not + # exceed the max_length. + while self.exists(name) or (max_length and len(name) > max_length): + # file_ext includes the dot. + name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext)) + if max_length is None: + continue + # Truncate file_root if max_length exceeded. + truncation = len(name) - max_length + if truncation > 0: + file_root = file_root[:-truncation] + # Entire file_root was truncated in attempt to find an available filename. + if not file_root: + raise SuspiciousFileOperation( + 'Storage can not find an available filename for "%s". ' + 'Please make sure that the corresponding file field ' + 'allows sufficient "max_length".' % name + ) + name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext)) + return name + + def generate_filename(self, filename): + """ + Validate the filename by calling get_valid_name() and return a filename + to be passed to the save() method. + """ + # `filename` may include a path as returned by FileField.upload_to. + dirname, filename = os.path.split(filename) + return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename))) + + def path(self, name): + """ + Return a local filesystem path where the file can be retrieved using + Python's built-in open() function. Storage systems that can't be + accessed using open() should *not* implement this method. + """ + raise NotImplementedError("This backend doesn't support absolute paths.") + + # The following methods form the public API for storage systems, but with + # no default implementations. Subclasses must implement *all* of these. + + def delete(self, name): + """ + Delete the specified file from the storage system. + """ + raise NotImplementedError('subclasses of Storage must provide a delete() method') + + def exists(self, name): + """ + Return True if a file referenced by the given name already exists in the + storage system, or False if the name is available for a new file. + """ + raise NotImplementedError('subclasses of Storage must provide an exists() method') + + def listdir(self, path): + """ + List the contents of the specified path. Return a 2-tuple of lists: + the first item being directories, the second item being files. + """ + raise NotImplementedError('subclasses of Storage must provide a listdir() method') + + def size(self, name): + """ + Return the total size, in bytes, of the file specified by name. + """ + raise NotImplementedError('subclasses of Storage must provide a size() method') + + def url(self, name): + """ + Return an absolute URL where the file's contents can be accessed + directly by a Web browser. + """ + raise NotImplementedError('subclasses of Storage must provide a url() method') + + def get_accessed_time(self, name): + """ + Return the last accessed time (as a datetime) of the file specified by + name. The datetime will be timezone-aware if USE_TZ=True. + """ + raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method') + + def get_created_time(self, name): + """ + Return the creation time (as a datetime) of the file specified by name. + The datetime will be timezone-aware if USE_TZ=True. + """ + raise NotImplementedError('subclasses of Storage must provide a get_created_time() method') + + def get_modified_time(self, name): + """ + Return the last modified time (as a datetime) of the file specified by + name. The datetime will be timezone-aware if USE_TZ=True. + """ + raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method') + + +@deconstructible +class FileSystemStorage(Storage): + """ + Standard filesystem storage + """ + + def __init__(self, location=None, base_url=None, file_permissions_mode=None, + directory_permissions_mode=None): + self._location = location + self._base_url = base_url + self._file_permissions_mode = file_permissions_mode + self._directory_permissions_mode = directory_permissions_mode + setting_changed.connect(self._clear_cached_properties) + + def _clear_cached_properties(self, setting, **kwargs): + """Reset setting based property values.""" + if setting == 'MEDIA_ROOT': + self.__dict__.pop('base_location', None) + self.__dict__.pop('location', None) + elif setting == 'MEDIA_URL': + self.__dict__.pop('base_url', None) + elif setting == 'FILE_UPLOAD_PERMISSIONS': + self.__dict__.pop('file_permissions_mode', None) + elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS': + self.__dict__.pop('directory_permissions_mode', None) + + def _value_or_setting(self, value, setting): + return setting if value is None else value + + @cached_property + def base_location(self): + return self._value_or_setting(self._location, settings.MEDIA_ROOT) + + @cached_property + def location(self): + return os.path.abspath(self.base_location) + + @cached_property + def base_url(self): + if self._base_url is not None and not self._base_url.endswith('/'): + self._base_url += '/' + return self._value_or_setting(self._base_url, settings.MEDIA_URL) + + @cached_property + def file_permissions_mode(self): + return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS) + + @cached_property + def directory_permissions_mode(self): + return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS) + + def _open(self, name, mode='rb'): + return File(open(self.path(name), mode)) + + def _save(self, name, content): + full_path = self.path(name) + + # Create any intermediate directories that do not exist. + directory = os.path.dirname(full_path) + if not os.path.exists(directory): + try: + if self.directory_permissions_mode is not None: + # os.makedirs applies the global umask, so we reset it, + # for consistency with file_permissions_mode behavior. + old_umask = os.umask(0) + try: + os.makedirs(directory, self.directory_permissions_mode) + finally: + os.umask(old_umask) + else: + os.makedirs(directory) + except FileNotFoundError: + # There's a race between os.path.exists() and os.makedirs(). + # If os.makedirs() fails with FileNotFoundError, the directory + # was created concurrently. + pass + if not os.path.isdir(directory): + raise IOError("%s exists and is not a directory." % directory) + + # There's a potential race condition between get_available_name and + # saving the file; it's possible that two threads might return the + # same name, at which point all sorts of fun happens. So we need to + # try to create the file, but if it already exists we have to go back + # to get_available_name() and try again. + + while True: + try: + # This file has a file path that we can move. + if hasattr(content, 'temporary_file_path'): + file_move_safe(content.temporary_file_path(), full_path) + + # This is a normal uploadedfile that we can stream. + else: + # This fun binary flag incantation makes os.open throw an + # OSError if the file already exists before we open it. + flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL | + getattr(os, 'O_BINARY', 0)) + # The current umask value is masked out by os.open! + fd = os.open(full_path, flags, 0o666) + _file = None + try: + locks.lock(fd, locks.LOCK_EX) + for chunk in content.chunks(): + if _file is None: + mode = 'wb' if isinstance(chunk, bytes) else 'wt' + _file = os.fdopen(fd, mode) + _file.write(chunk) + finally: + locks.unlock(fd) + if _file is not None: + _file.close() + else: + os.close(fd) + except FileExistsError: + # A new name is needed if the file exists. + name = self.get_available_name(name) + full_path = self.path(name) + else: + # OK, the file save worked. Break out of the loop. + break + + if self.file_permissions_mode is not None: + os.chmod(full_path, self.file_permissions_mode) + + # Store filenames with forward slashes, even on Windows. + return name.replace('\\', '/') + + def delete(self, name): + assert name, "The name argument is not allowed to be empty." + name = self.path(name) + # If the file or directory exists, delete it from the filesystem. + try: + if os.path.isdir(name): + os.rmdir(name) + else: + os.remove(name) + except FileNotFoundError: + # FileNotFoundError is raised if the file or directory was removed + # concurrently. + pass + + def exists(self, name): + return os.path.exists(self.path(name)) + + def listdir(self, path): + path = self.path(path) + directories, files = [], [] + for entry in os.listdir(path): + if os.path.isdir(os.path.join(path, entry)): + directories.append(entry) + else: + files.append(entry) + return directories, files + + def path(self, name): + return safe_join(self.location, name) + + def size(self, name): + return os.path.getsize(self.path(name)) + + def url(self, name): + if self.base_url is None: + raise ValueError("This file is not accessible via a URL.") + url = filepath_to_uri(name) + if url is not None: + url = url.lstrip('/') + return urljoin(self.base_url, url) + + def _datetime_from_timestamp(self, ts): + """ + If timezone support is enabled, make an aware datetime object in UTC; + otherwise make a naive one in the local timezone. + """ + if settings.USE_TZ: + # Safe to use .replace() because UTC doesn't have DST + return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc) + else: + return datetime.fromtimestamp(ts) + + def get_accessed_time(self, name): + return self._datetime_from_timestamp(os.path.getatime(self.path(name))) + + def get_created_time(self, name): + return self._datetime_from_timestamp(os.path.getctime(self.path(name))) + + def get_modified_time(self, name): + return self._datetime_from_timestamp(os.path.getmtime(self.path(name))) + + +def get_storage_class(import_path=None): + return import_string(import_path or settings.DEFAULT_FILE_STORAGE) + + +class DefaultStorage(LazyObject): + def _setup(self): + self._wrapped = get_storage_class()() + + +default_storage = DefaultStorage() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/temp.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/temp.py new file mode 100644 index 0000000000000000000000000000000000000000..5fbb91b9ee92e032d232e51befed3caedd71e829 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/temp.py @@ -0,0 +1,74 @@ +""" +The temp module provides a NamedTemporaryFile that can be reopened in the same +process on any platform. Most platforms use the standard Python +tempfile.NamedTemporaryFile class, but Windows users are given a custom class. + +This is needed because the Python implementation of NamedTemporaryFile uses the +O_TEMPORARY flag under Windows, which prevents the file from being reopened +if the same flag is not provided [1][2]. Note that this does not address the +more general issue of opening a file for writing and reading in multiple +processes in a manner that works across platforms. + +The custom version of NamedTemporaryFile doesn't support the same keyword +arguments available in tempfile.NamedTemporaryFile. + +1: https://mail.python.org/pipermail/python-list/2005-December/336957.html +2: http://bugs.python.org/issue14243 +""" + +import os +import tempfile + +from django.core.files.utils import FileProxyMixin + +__all__ = ('NamedTemporaryFile', 'gettempdir',) + + +if os.name == 'nt': + class TemporaryFile(FileProxyMixin): + """ + Temporary file object constructor that supports reopening of the + temporary file in Windows. + + Unlike tempfile.NamedTemporaryFile from the standard library, + __init__() doesn't support the 'delete', 'buffering', 'encoding', or + 'newline' keyword arguments. + """ + def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None): + fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir) + self.name = name + self.file = os.fdopen(fd, mode, bufsize) + self.close_called = False + + # Because close can be called during shutdown + # we need to cache os.unlink and access it + # as self.unlink only + unlink = os.unlink + + def close(self): + if not self.close_called: + self.close_called = True + try: + self.file.close() + except (OSError, IOError): + pass + try: + self.unlink(self.name) + except OSError: + pass + + def __del__(self): + self.close() + + def __enter__(self): + self.file.__enter__() + return self + + def __exit__(self, exc, value, tb): + self.file.__exit__(exc, value, tb) + + NamedTemporaryFile = TemporaryFile +else: + NamedTemporaryFile = tempfile.NamedTemporaryFile + +gettempdir = tempfile.gettempdir diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/uploadedfile.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/uploadedfile.py new file mode 100644 index 0000000000000000000000000000000000000000..8f1d26ea5ff1c033646a29f7896bf3586de700fb --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/uploadedfile.py @@ -0,0 +1,118 @@ +""" +Classes representing uploaded files. +""" + +import os +from io import BytesIO + +from django.conf import settings +from django.core.files import temp as tempfile +from django.core.files.base import File + +__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile', + 'SimpleUploadedFile') + + +class UploadedFile(File): + """ + An abstract uploaded file (``TemporaryUploadedFile`` and + ``InMemoryUploadedFile`` are the built-in concrete subclasses). + + An ``UploadedFile`` object behaves somewhat like a file object and + represents some file data that the user submitted with a form. + """ + DEFAULT_CHUNK_SIZE = 64 * 2 ** 10 + + def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None): + super().__init__(file, name) + self.size = size + self.content_type = content_type + self.charset = charset + self.content_type_extra = content_type_extra + + def __repr__(self): + return "<%s: %s (%s)>" % (self.__class__.__name__, self.name, self.content_type) + + def _get_name(self): + return self._name + + def _set_name(self, name): + # Sanitize the file name so that it can't be dangerous. + if name is not None: + # Just use the basename of the file -- anything else is dangerous. + name = os.path.basename(name) + + # File names longer than 255 characters can cause problems on older OSes. + if len(name) > 255: + name, ext = os.path.splitext(name) + ext = ext[:255] + name = name[:255 - len(ext)] + ext + + self._name = name + + name = property(_get_name, _set_name) + + +class TemporaryUploadedFile(UploadedFile): + """ + A file uploaded to a temporary location (i.e. stream-to-disk). + """ + def __init__(self, name, content_type, size, charset, content_type_extra=None): + _, ext = os.path.splitext(name) + file = tempfile.NamedTemporaryFile(suffix='.upload' + ext, dir=settings.FILE_UPLOAD_TEMP_DIR) + super().__init__(file, name, content_type, size, charset, content_type_extra) + + def temporary_file_path(self): + """Return the full path of this file.""" + return self.file.name + + def close(self): + try: + return self.file.close() + except FileNotFoundError: + # The file was moved or deleted before the tempfile could unlink + # it. Still sets self.file.close_called and calls + # self.file.file.close() before the exception. + pass + + +class InMemoryUploadedFile(UploadedFile): + """ + A file uploaded into memory (i.e. stream-to-memory). + """ + def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None): + super().__init__(file, name, content_type, size, charset, content_type_extra) + self.field_name = field_name + + def open(self, mode=None): + self.file.seek(0) + return self + + def chunks(self, chunk_size=None): + self.file.seek(0) + yield self.read() + + def multiple_chunks(self, chunk_size=None): + # Since it's in memory, we'll never have multiple chunks. + return False + + +class SimpleUploadedFile(InMemoryUploadedFile): + """ + A simple representation of a file, which just has content, size, and a name. + """ + def __init__(self, name, content, content_type='text/plain'): + content = content or b'' + super().__init__(BytesIO(content), None, name, content_type, len(content), None, None) + + @classmethod + def from_dict(cls, file_dict): + """ + Create a SimpleUploadedFile object from a dictionary with keys: + - filename + - content-type + - content + """ + return cls(file_dict['filename'], + file_dict['content'], + file_dict.get('content-type', 'text/plain')) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/uploadhandler.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/uploadhandler.py new file mode 100644 index 0000000000000000000000000000000000000000..a0f34b741ca70f915405c65ee18f7c3ff267793d --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/uploadhandler.py @@ -0,0 +1,208 @@ +""" +Base file upload handler classes, and the built-in concrete subclasses +""" + +from io import BytesIO + +from django.conf import settings +from django.core.files.uploadedfile import ( + InMemoryUploadedFile, TemporaryUploadedFile, +) +from django.utils.module_loading import import_string + +__all__ = [ + 'UploadFileException', 'StopUpload', 'SkipFile', 'FileUploadHandler', + 'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler', + 'StopFutureHandlers' +] + + +class UploadFileException(Exception): + """ + Any error having to do with uploading files. + """ + pass + + +class StopUpload(UploadFileException): + """ + This exception is raised when an upload must abort. + """ + def __init__(self, connection_reset=False): + """ + If ``connection_reset`` is ``True``, Django knows will halt the upload + without consuming the rest of the upload. This will cause the browser to + show a "connection reset" error. + """ + self.connection_reset = connection_reset + + def __str__(self): + if self.connection_reset: + return 'StopUpload: Halt current upload.' + else: + return 'StopUpload: Consume request data, then halt.' + + +class SkipFile(UploadFileException): + """ + This exception is raised by an upload handler that wants to skip a given file. + """ + pass + + +class StopFutureHandlers(UploadFileException): + """ + Upload handers that have handled a file and do not want future handlers to + run should raise this exception instead of returning None. + """ + pass + + +class FileUploadHandler: + """ + Base class for streaming upload handlers. + """ + chunk_size = 64 * 2 ** 10 # : The default chunk size is 64 KB. + + def __init__(self, request=None): + self.file_name = None + self.content_type = None + self.content_length = None + self.charset = None + self.content_type_extra = None + self.request = request + + def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): + """ + Handle the raw input from the client. + + Parameters: + + :input_data: + An object that supports reading via .read(). + :META: + ``request.META``. + :content_length: + The (integer) value of the Content-Length header from the + client. + :boundary: The boundary from the Content-Type header. Be sure to + prepend two '--'. + """ + pass + + def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None): + """ + Signal that a new file has been started. + + Warning: As with any data from the client, you should not trust + content_length (and sometimes won't even get it). + """ + self.field_name = field_name + self.file_name = file_name + self.content_type = content_type + self.content_length = content_length + self.charset = charset + self.content_type_extra = content_type_extra + + def receive_data_chunk(self, raw_data, start): + """ + Receive data from the streamed upload parser. ``start`` is the position + in the file of the chunk. + """ + raise NotImplementedError('subclasses of FileUploadHandler must provide a receive_data_chunk() method') + + def file_complete(self, file_size): + """ + Signal that a file has completed. File size corresponds to the actual + size accumulated by all the chunks. + + Subclasses should return a valid ``UploadedFile`` object. + """ + raise NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method') + + def upload_complete(self): + """ + Signal that the upload is complete. Subclasses should perform cleanup + that is necessary for this handler. + """ + pass + + +class TemporaryFileUploadHandler(FileUploadHandler): + """ + Upload handler that streams data into a temporary file. + """ + def new_file(self, *args, **kwargs): + """ + Create the file object to append to as data is coming in. + """ + super().new_file(*args, **kwargs) + self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra) + + def receive_data_chunk(self, raw_data, start): + self.file.write(raw_data) + + def file_complete(self, file_size): + self.file.seek(0) + self.file.size = file_size + return self.file + + +class MemoryFileUploadHandler(FileUploadHandler): + """ + File upload handler to stream uploads into memory (used for small files). + """ + + def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): + """ + Use the content_length to signal whether or not this handler should be + used. + """ + # Check the content-length header to see if we should + # If the post is too large, we cannot use the Memory handler. + if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE: + self.activated = False + else: + self.activated = True + + def new_file(self, *args, **kwargs): + super().new_file(*args, **kwargs) + if self.activated: + self.file = BytesIO() + raise StopFutureHandlers() + + def receive_data_chunk(self, raw_data, start): + """Add the data to the BytesIO file.""" + if self.activated: + self.file.write(raw_data) + else: + return raw_data + + def file_complete(self, file_size): + """Return a file object if this handler is activated.""" + if not self.activated: + return + + self.file.seek(0) + return InMemoryUploadedFile( + file=self.file, + field_name=self.field_name, + name=self.file_name, + content_type=self.content_type, + size=file_size, + charset=self.charset, + content_type_extra=self.content_type_extra + ) + + +def load_handler(path, *args, **kwargs): + """ + Given a path to a handler, return an instance of that handler. + + E.g.:: + >>> from django.http import HttpRequest + >>> request = HttpRequest() + >>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request) + + """ + return import_string(path)(*args, **kwargs) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..de896071759bd1593a7f4c32aba0a6d5f6feba38 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/files/utils.py @@ -0,0 +1,52 @@ +class FileProxyMixin: + """ + A mixin class used to forward file methods to an underlaying file + object. The internal file object has to be called "file":: + + class FileProxy(FileProxyMixin): + def __init__(self, file): + self.file = file + """ + + encoding = property(lambda self: self.file.encoding) + fileno = property(lambda self: self.file.fileno) + flush = property(lambda self: self.file.flush) + isatty = property(lambda self: self.file.isatty) + newlines = property(lambda self: self.file.newlines) + read = property(lambda self: self.file.read) + readinto = property(lambda self: self.file.readinto) + readline = property(lambda self: self.file.readline) + readlines = property(lambda self: self.file.readlines) + seek = property(lambda self: self.file.seek) + tell = property(lambda self: self.file.tell) + truncate = property(lambda self: self.file.truncate) + write = property(lambda self: self.file.write) + writelines = property(lambda self: self.file.writelines) + + @property + def closed(self): + return not self.file or self.file.closed + + def readable(self): + if self.closed: + return False + if hasattr(self.file, 'readable'): + return self.file.readable() + return True + + def writable(self): + if self.closed: + return False + if hasattr(self.file, 'writable'): + return self.file.writable() + return 'w' in getattr(self.file, 'mode', '') + + def seekable(self): + if self.closed: + return False + if hasattr(self.file, 'seekable'): + return self.file.seekable() + return True + + def __iter__(self): + return iter(self.file) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..80cc8b3281fc0cfb8e3c1f14e9a0295de77bb3ca --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/base.py @@ -0,0 +1,171 @@ +import logging +import types + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed +from django.db import connections, transaction +from django.urls import get_resolver, set_urlconf +from django.utils.module_loading import import_string + +from .exception import convert_exception_to_response, get_exception_response + +logger = logging.getLogger('django.request') + + +class BaseHandler: + _request_middleware = None + _view_middleware = None + _template_response_middleware = None + _response_middleware = None + _exception_middleware = None + _middleware_chain = None + + def load_middleware(self): + """ + Populate middleware lists from settings.MIDDLEWARE. + + Must be called after the environment is fixed (see __call__ in subclasses). + """ + self._request_middleware = [] + self._view_middleware = [] + self._template_response_middleware = [] + self._response_middleware = [] + self._exception_middleware = [] + + handler = convert_exception_to_response(self._get_response) + for middleware_path in reversed(settings.MIDDLEWARE): + middleware = import_string(middleware_path) + try: + mw_instance = middleware(handler) + except MiddlewareNotUsed as exc: + if settings.DEBUG: + if str(exc): + logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc) + else: + logger.debug('MiddlewareNotUsed: %r', middleware_path) + continue + + if mw_instance is None: + raise ImproperlyConfigured( + 'Middleware factory %s returned None.' % middleware_path + ) + + if hasattr(mw_instance, 'process_view'): + self._view_middleware.insert(0, mw_instance.process_view) + if hasattr(mw_instance, 'process_template_response'): + self._template_response_middleware.append(mw_instance.process_template_response) + if hasattr(mw_instance, 'process_exception'): + self._exception_middleware.append(mw_instance.process_exception) + + handler = convert_exception_to_response(mw_instance) + + # We only assign to this when initialization is complete as it is used + # as a flag for initialization being complete. + self._middleware_chain = handler + + def make_view_atomic(self, view): + non_atomic_requests = getattr(view, '_non_atomic_requests', set()) + for db in connections.all(): + if db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests: + view = transaction.atomic(using=db.alias)(view) + return view + + def get_exception_response(self, request, resolver, status_code, exception): + return get_exception_response(request, resolver, status_code, exception, self.__class__) + + def get_response(self, request): + """Return an HttpResponse object for the given HttpRequest.""" + # Setup default url resolver for this thread + set_urlconf(settings.ROOT_URLCONF) + + response = self._middleware_chain(request) + + response._closable_objects.append(request) + + # If the exception handler returns a TemplateResponse that has not + # been rendered, force it to be rendered. + if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)): + response = response.render() + + if response.status_code == 404: + logger.warning( + 'Not Found: %s', request.path, + extra={'status_code': 404, 'request': request}, + ) + + return response + + def _get_response(self, request): + """ + Resolve and call the view, then apply view, exception, and + template_response middleware. This method is everything that happens + inside the request/response middleware. + """ + response = None + + if hasattr(request, 'urlconf'): + urlconf = request.urlconf + set_urlconf(urlconf) + resolver = get_resolver(urlconf) + else: + resolver = get_resolver() + + resolver_match = resolver.resolve(request.path_info) + callback, callback_args, callback_kwargs = resolver_match + request.resolver_match = resolver_match + + # Apply view middleware + for middleware_method in self._view_middleware: + response = middleware_method(request, callback, callback_args, callback_kwargs) + if response: + break + + if response is None: + wrapped_callback = self.make_view_atomic(callback) + try: + response = wrapped_callback(request, *callback_args, **callback_kwargs) + except Exception as e: + response = self.process_exception_by_middleware(e, request) + + # Complain if the view returned None (a common error). + if response is None: + if isinstance(callback, types.FunctionType): # FBV + view_name = callback.__name__ + else: # CBV + view_name = callback.__class__.__name__ + '.__call__' + + raise ValueError( + "The view %s.%s didn't return an HttpResponse object. It " + "returned None instead." % (callback.__module__, view_name) + ) + + # If the response supports deferred rendering, apply template + # response middleware and then render the response + elif hasattr(response, 'render') and callable(response.render): + for middleware_method in self._template_response_middleware: + response = middleware_method(request, response) + # Complain if the template response middleware returned None (a common error). + if response is None: + raise ValueError( + "%s.process_template_response didn't return an " + "HttpResponse object. It returned None instead." + % (middleware_method.__self__.__class__.__name__) + ) + + try: + response = response.render() + except Exception as e: + response = self.process_exception_by_middleware(e, request) + + return response + + def process_exception_by_middleware(self, exception, request): + """ + Pass the exception to the exception middleware. If no middleware + return a response for this exception, raise it. + """ + for middleware_method in self._exception_middleware: + response = middleware_method(request, exception) + if response: + return response + raise diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/exception.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..be2e4734ee49417fc47b3060e1f88683d47fc2e9 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/exception.py @@ -0,0 +1,126 @@ +import logging +import sys +from functools import wraps + +from django.conf import settings +from django.core import signals +from django.core.exceptions import ( + PermissionDenied, RequestDataTooBig, SuspiciousOperation, + TooManyFieldsSent, +) +from django.http import Http404 +from django.http.multipartparser import MultiPartParserError +from django.urls import get_resolver, get_urlconf +from django.views import debug + +logger = logging.getLogger('django.request') + + +def convert_exception_to_response(get_response): + """ + Wrap the given get_response callable in exception-to-response conversion. + + All exceptions will be converted. All known 4xx exceptions (Http404, + PermissionDenied, MultiPartParserError, SuspiciousOperation) will be + converted to the appropriate response, and all other exceptions will be + converted to 500 responses. + + This decorator is automatically applied to all middleware to ensure that + no middleware leaks an exception and that the next middleware in the stack + can rely on getting a response instead of an exception. + """ + @wraps(get_response) + def inner(request): + try: + response = get_response(request) + except Exception as exc: + response = response_for_exception(request, exc) + return response + return inner + + +def response_for_exception(request, exc): + if isinstance(exc, Http404): + if settings.DEBUG: + response = debug.technical_404_response(request, exc) + else: + response = get_exception_response(request, get_resolver(get_urlconf()), 404, exc) + + elif isinstance(exc, PermissionDenied): + logger.warning( + 'Forbidden (Permission denied): %s', request.path, + extra={'status_code': 403, 'request': request}, + ) + response = get_exception_response(request, get_resolver(get_urlconf()), 403, exc) + + elif isinstance(exc, MultiPartParserError): + logger.warning( + 'Bad request (Unable to parse request body): %s', request.path, + extra={'status_code': 400, 'request': request}, + ) + response = get_exception_response(request, get_resolver(get_urlconf()), 400, exc) + + elif isinstance(exc, SuspiciousOperation): + if isinstance(exc, (RequestDataTooBig, TooManyFieldsSent)): + # POST data can't be accessed again, otherwise the original + # exception would be raised. + request._mark_post_parse_error() + + # The request logger receives events for any problematic request + # The security logger receives events for all SuspiciousOperations + security_logger = logging.getLogger('django.security.%s' % exc.__class__.__name__) + security_logger.error( + str(exc), + extra={'status_code': 400, 'request': request}, + ) + if settings.DEBUG: + response = debug.technical_500_response(request, *sys.exc_info(), status_code=400) + else: + response = get_exception_response(request, get_resolver(get_urlconf()), 400, exc) + + elif isinstance(exc, SystemExit): + # Allow sys.exit() to actually exit. See tickets #1023 and #4701 + raise + + else: + signals.got_request_exception.send(sender=None, request=request) + response = handle_uncaught_exception(request, get_resolver(get_urlconf()), sys.exc_info()) + + # Force a TemplateResponse to be rendered. + if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)): + response = response.render() + + return response + + +def get_exception_response(request, resolver, status_code, exception, sender=None): + try: + callback, param_dict = resolver.resolve_error_handler(status_code) + response = callback(request, **dict(param_dict, exception=exception)) + except Exception: + signals.got_request_exception.send(sender=sender, request=request) + response = handle_uncaught_exception(request, resolver, sys.exc_info()) + + return response + + +def handle_uncaught_exception(request, resolver, exc_info): + """ + Processing for any otherwise uncaught exceptions (those that will + generate HTTP 500 responses). + """ + if settings.DEBUG_PROPAGATE_EXCEPTIONS: + raise + + logger.error( + 'Internal Server Error: %s', request.path, + exc_info=exc_info, + extra={'status_code': 500, 'request': request}, + ) + + if settings.DEBUG: + return debug.technical_500_response(request, *exc_info) + + # Return an HttpResponse that displays a friendly error message. + callback, param_dict = resolver.resolve_error_handler(500) + return callback(request, **param_dict) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/wsgi.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/wsgi.py new file mode 100644 index 0000000000000000000000000000000000000000..2ec6269beadadac94fc8ad6dcb0b3bcae220e901 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/handlers/wsgi.py @@ -0,0 +1,220 @@ +import cgi +import codecs +import re +from io import BytesIO + +from django.conf import settings +from django.core import signals +from django.core.handlers import base +from django.http import HttpRequest, QueryDict, parse_cookie +from django.urls import set_script_prefix +from django.utils.encoding import repercent_broken_unicode +from django.utils.functional import cached_property + +_slashes_re = re.compile(br'/+') + + +class LimitedStream: + """Wrap another stream to disallow reading it past a number of bytes.""" + def __init__(self, stream, limit, buf_size=64 * 1024 * 1024): + self.stream = stream + self.remaining = limit + self.buffer = b'' + self.buf_size = buf_size + + def _read_limited(self, size=None): + if size is None or size > self.remaining: + size = self.remaining + if size == 0: + return b'' + result = self.stream.read(size) + self.remaining -= len(result) + return result + + def read(self, size=None): + if size is None: + result = self.buffer + self._read_limited() + self.buffer = b'' + elif size < len(self.buffer): + result = self.buffer[:size] + self.buffer = self.buffer[size:] + else: # size >= len(self.buffer) + result = self.buffer + self._read_limited(size - len(self.buffer)) + self.buffer = b'' + return result + + def readline(self, size=None): + while b'\n' not in self.buffer and \ + (size is None or len(self.buffer) < size): + if size: + # since size is not None here, len(self.buffer) < size + chunk = self._read_limited(size - len(self.buffer)) + else: + chunk = self._read_limited() + if not chunk: + break + self.buffer += chunk + sio = BytesIO(self.buffer) + if size: + line = sio.readline(size) + else: + line = sio.readline() + self.buffer = sio.read() + return line + + +class WSGIRequest(HttpRequest): + def __init__(self, environ): + script_name = get_script_name(environ) + path_info = get_path_info(environ) + if not path_info: + # Sometimes PATH_INFO exists, but is empty (e.g. accessing + # the SCRIPT_NAME URL without a trailing slash). We really need to + # operate as if they'd requested '/'. Not amazingly nice to force + # the path like this, but should be harmless. + path_info = '/' + self.environ = environ + self.path_info = path_info + # be careful to only replace the first slash in the path because of + # http://test/something and http://test//something being different as + # stated in http://www.ietf.org/rfc/rfc2396.txt + self.path = '%s/%s' % (script_name.rstrip('/'), + path_info.replace('/', '', 1)) + self.META = environ + self.META['PATH_INFO'] = path_info + self.META['SCRIPT_NAME'] = script_name + self.method = environ['REQUEST_METHOD'].upper() + self.content_type, self.content_params = cgi.parse_header(environ.get('CONTENT_TYPE', '')) + if 'charset' in self.content_params: + try: + codecs.lookup(self.content_params['charset']) + except LookupError: + pass + else: + self.encoding = self.content_params['charset'] + self._post_parse_error = False + try: + content_length = int(environ.get('CONTENT_LENGTH')) + except (ValueError, TypeError): + content_length = 0 + self._stream = LimitedStream(self.environ['wsgi.input'], content_length) + self._read_started = False + self.resolver_match = None + + def _get_scheme(self): + return self.environ.get('wsgi.url_scheme') + + @cached_property + def GET(self): + # The WSGI spec says 'QUERY_STRING' may be absent. + raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '') + return QueryDict(raw_query_string, encoding=self._encoding) + + def _get_post(self): + if not hasattr(self, '_post'): + self._load_post_and_files() + return self._post + + def _set_post(self, post): + self._post = post + + @cached_property + def COOKIES(self): + raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '') + return parse_cookie(raw_cookie) + + @property + def FILES(self): + if not hasattr(self, '_files'): + self._load_post_and_files() + return self._files + + POST = property(_get_post, _set_post) + + +class WSGIHandler(base.BaseHandler): + request_class = WSGIRequest + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.load_middleware() + + def __call__(self, environ, start_response): + set_script_prefix(get_script_name(environ)) + signals.request_started.send(sender=self.__class__, environ=environ) + request = self.request_class(environ) + response = self.get_response(request) + + response._handler_class = self.__class__ + + status = '%d %s' % (response.status_code, response.reason_phrase) + response_headers = list(response.items()) + for c in response.cookies.values(): + response_headers.append(('Set-Cookie', c.output(header=''))) + start_response(status, response_headers) + if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'): + response = environ['wsgi.file_wrapper'](response.file_to_stream) + return response + + +def get_path_info(environ): + """Return the HTTP request's PATH_INFO as a string.""" + path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/') + + return repercent_broken_unicode(path_info).decode() + + +def get_script_name(environ): + """ + Return the equivalent of the HTTP request's SCRIPT_NAME environment + variable. If Apache mod_rewrite is used, return what would have been + the script name prior to any rewriting (so it's the script name as seen + from the client's perspective), unless the FORCE_SCRIPT_NAME setting is + set (to anything). + """ + if settings.FORCE_SCRIPT_NAME is not None: + return settings.FORCE_SCRIPT_NAME + + # If Apache's mod_rewrite had a whack at the URL, Apache set either + # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any + # rewrites. Unfortunately not every Web server (lighttpd!) passes this + # information through all the time, so FORCE_SCRIPT_NAME, above, is still + # needed. + script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '') + if not script_url: + script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '') + + if script_url: + if b'//' in script_url: + # mod_wsgi squashes multiple successive slashes in PATH_INFO, + # do the same with script_url before manipulating paths (#17133). + script_url = _slashes_re.sub(b'/', script_url) + path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '') + script_name = script_url[:-len(path_info)] if path_info else script_url + else: + script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '') + + return script_name.decode() + + +def get_bytes_from_wsgi(environ, key, default): + """ + Get a value from the WSGI environ dictionary as bytes. + + key and default should be strings. + """ + value = environ.get(key, default) + # Non-ASCII values in the WSGI environ are arbitrarily decoded with + # ISO-8859-1. This is wrong for Django websites where UTF-8 is the default. + # Re-encode to recover the original bytestring. + return value.encode('iso-8859-1') + + +def get_str_from_wsgi(environ, key, default): + """ + Get a value from the WSGI environ dictionary as str. + + key and default should be str objects. + """ + value = get_bytes_from_wsgi(environ, key, default) + return value.decode(errors='replace') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..05c8c6a1b188bc77e4eaaa5b94f1d804f3600234 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/__init__.py @@ -0,0 +1,116 @@ +""" +Tools for sending email. +""" +from django.conf import settings +# Imported for backwards compatibility and for the sake +# of a cleaner namespace. These symbols used to be in +# django/core/mail.py before the introduction of email +# backends and the subsequent reorganization (See #10355) +from django.core.mail.message import ( + DEFAULT_ATTACHMENT_MIME_TYPE, BadHeaderError, EmailMessage, + EmailMultiAlternatives, SafeMIMEMultipart, SafeMIMEText, + forbid_multi_line_headers, make_msgid, +) +from django.core.mail.utils import DNS_NAME, CachedDnsName +from django.utils.module_loading import import_string + +__all__ = [ + 'CachedDnsName', 'DNS_NAME', 'EmailMessage', 'EmailMultiAlternatives', + 'SafeMIMEText', 'SafeMIMEMultipart', 'DEFAULT_ATTACHMENT_MIME_TYPE', + 'make_msgid', 'BadHeaderError', 'forbid_multi_line_headers', + 'get_connection', 'send_mail', 'send_mass_mail', 'mail_admins', + 'mail_managers', +] + + +def get_connection(backend=None, fail_silently=False, **kwds): + """Load an email backend and return an instance of it. + + If backend is None (default), use settings.EMAIL_BACKEND. + + Both fail_silently and other keyword arguments are used in the + constructor of the backend. + """ + klass = import_string(backend or settings.EMAIL_BACKEND) + return klass(fail_silently=fail_silently, **kwds) + + +def send_mail(subject, message, from_email, recipient_list, + fail_silently=False, auth_user=None, auth_password=None, + connection=None, html_message=None): + """ + Easy wrapper for sending a single message to a recipient list. All members + of the recipient list will see the other recipients in the 'To' field. + + If auth_user is None, use the EMAIL_HOST_USER setting. + If auth_password is None, use the EMAIL_HOST_PASSWORD setting. + + Note: The API for this method is frozen. New code wanting to extend the + functionality should use the EmailMessage class directly. + """ + connection = connection or get_connection( + username=auth_user, + password=auth_password, + fail_silently=fail_silently, + ) + mail = EmailMultiAlternatives(subject, message, from_email, recipient_list, connection=connection) + if html_message: + mail.attach_alternative(html_message, 'text/html') + + return mail.send() + + +def send_mass_mail(datatuple, fail_silently=False, auth_user=None, + auth_password=None, connection=None): + """ + Given a datatuple of (subject, message, from_email, recipient_list), send + each message to each recipient list. Return the number of emails sent. + + If from_email is None, use the DEFAULT_FROM_EMAIL setting. + If auth_user and auth_password are set, use them to log in. + If auth_user is None, use the EMAIL_HOST_USER setting. + If auth_password is None, use the EMAIL_HOST_PASSWORD setting. + + Note: The API for this method is frozen. New code wanting to extend the + functionality should use the EmailMessage class directly. + """ + connection = connection or get_connection( + username=auth_user, + password=auth_password, + fail_silently=fail_silently, + ) + messages = [ + EmailMessage(subject, message, sender, recipient, connection=connection) + for subject, message, sender, recipient in datatuple + ] + return connection.send_messages(messages) + + +def mail_admins(subject, message, fail_silently=False, connection=None, + html_message=None): + """Send a message to the admins, as defined by the ADMINS setting.""" + if not settings.ADMINS: + return + mail = EmailMultiAlternatives( + '%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject), message, + settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS], + connection=connection, + ) + if html_message: + mail.attach_alternative(html_message, 'text/html') + mail.send(fail_silently=fail_silently) + + +def mail_managers(subject, message, fail_silently=False, connection=None, + html_message=None): + """Send a message to the managers, as defined by the MANAGERS setting.""" + if not settings.MANAGERS: + return + mail = EmailMultiAlternatives( + '%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject), message, + settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS], + connection=connection, + ) + if html_message: + mail.attach_alternative(html_message, 'text/html') + mail.send(fail_silently=fail_silently) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5973b499b0d303d992a90f1a4368e5873bc33ef2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/__init__.py @@ -0,0 +1 @@ +# Mail backends shipped with Django. diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/base.py new file mode 100644 index 0000000000000000000000000000000000000000..d68770333257c9f20013ce5e8ff2db0e4ee05d78 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/base.py @@ -0,0 +1,59 @@ +"""Base email backend class.""" + + +class BaseEmailBackend: + """ + Base class for email backend implementations. + + Subclasses must at least overwrite send_messages(). + + open() and close() can be called indirectly by using a backend object as a + context manager: + + with backend as connection: + # do something with connection + pass + """ + def __init__(self, fail_silently=False, **kwargs): + self.fail_silently = fail_silently + + def open(self): + """ + Open a network connection. + + This method can be overwritten by backend implementations to + open a network connection. + + It's up to the backend implementation to track the status of + a network connection if it's needed by the backend. + + This method can be called by applications to force a single + network connection to be used when sending mails. See the + send_messages() method of the SMTP backend for a reference + implementation. + + The default implementation does nothing. + """ + pass + + def close(self): + """Close a network connection.""" + pass + + def __enter__(self): + try: + self.open() + except Exception: + self.close() + raise + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def send_messages(self, email_messages): + """ + Send one or more EmailMessage objects and return the number of email + messages sent. + """ + raise NotImplementedError('subclasses of BaseEmailBackend must override send_messages() method') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/console.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/console.py new file mode 100644 index 0000000000000000000000000000000000000000..a8bdcbd2c07ec42173480841b0166bec4d18b965 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/console.py @@ -0,0 +1,42 @@ +""" +Email backend that writes messages to console instead of sending them. +""" +import sys +import threading + +from django.core.mail.backends.base import BaseEmailBackend + + +class EmailBackend(BaseEmailBackend): + def __init__(self, *args, **kwargs): + self.stream = kwargs.pop('stream', sys.stdout) + self._lock = threading.RLock() + super().__init__(*args, **kwargs) + + def write_message(self, message): + msg = message.message() + msg_data = msg.as_bytes() + charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8' + msg_data = msg_data.decode(charset) + self.stream.write('%s\n' % msg_data) + self.stream.write('-' * 79) + self.stream.write('\n') + + def send_messages(self, email_messages): + """Write all messages to the stream in a thread-safe way.""" + if not email_messages: + return + msg_count = 0 + with self._lock: + try: + stream_created = self.open() + for message in email_messages: + self.write_message(message) + self.stream.flush() # flush after each message + msg_count += 1 + if stream_created: + self.close() + except Exception: + if not self.fail_silently: + raise + return msg_count diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/dummy.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..7e47fe7564403ed756d4774e3c3f9dda1427bcdb --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/dummy.py @@ -0,0 +1,10 @@ +""" +Dummy email backend that does nothing. +""" + +from django.core.mail.backends.base import BaseEmailBackend + + +class EmailBackend(BaseEmailBackend): + def send_messages(self, email_messages): + return len(list(email_messages)) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/filebased.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/filebased.py new file mode 100644 index 0000000000000000000000000000000000000000..ddcd9ed97b6387295b4a8c013f1c25f06adeb842 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/filebased.py @@ -0,0 +1,70 @@ +"""Email backend that writes messages to a file.""" + +import datetime +import os + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.core.mail.backends.console import ( + EmailBackend as ConsoleEmailBackend, +) + + +class EmailBackend(ConsoleEmailBackend): + def __init__(self, *args, file_path=None, **kwargs): + self._fname = None + if file_path is not None: + self.file_path = file_path + else: + self.file_path = getattr(settings, 'EMAIL_FILE_PATH', None) + # Make sure self.file_path is a string. + if not isinstance(self.file_path, str): + raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path) + self.file_path = os.path.abspath(self.file_path) + # Make sure that self.file_path is a directory if it exists. + if os.path.exists(self.file_path) and not os.path.isdir(self.file_path): + raise ImproperlyConfigured( + 'Path for saving email messages exists, but is not a directory: %s' % self.file_path + ) + # Try to create it, if it not exists. + elif not os.path.exists(self.file_path): + try: + os.makedirs(self.file_path) + except OSError as err: + raise ImproperlyConfigured( + 'Could not create directory for saving email messages: %s (%s)' % (self.file_path, err) + ) + # Make sure that self.file_path is writable. + if not os.access(self.file_path, os.W_OK): + raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path) + # Finally, call super(). + # Since we're using the console-based backend as a base, + # force the stream to be None, so we don't default to stdout + kwargs['stream'] = None + super().__init__(*args, **kwargs) + + def write_message(self, message): + self.stream.write(message.message().as_bytes() + b'\n') + self.stream.write(b'-' * 79) + self.stream.write(b'\n') + + def _get_filename(self): + """Return a unique file name.""" + if self._fname is None: + timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + fname = "%s-%s.log" % (timestamp, abs(id(self))) + self._fname = os.path.join(self.file_path, fname) + return self._fname + + def open(self): + if self.stream is None: + self.stream = open(self._get_filename(), 'ab') + return True + return False + + def close(self): + try: + if self.stream is not None: + self.stream.close() + finally: + self.stream = None diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/locmem.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/locmem.py new file mode 100644 index 0000000000000000000000000000000000000000..84732e997b64b253149de8dc86a90bbe5e346141 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/locmem.py @@ -0,0 +1,30 @@ +""" +Backend for test environment. +""" + +from django.core import mail +from django.core.mail.backends.base import BaseEmailBackend + + +class EmailBackend(BaseEmailBackend): + """ + An email backend for use during test sessions. + + The test connection stores email messages in a dummy outbox, + rather than sending them out on the wire. + + The dummy outbox is accessible through the outbox instance attribute. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if not hasattr(mail, 'outbox'): + mail.outbox = [] + + def send_messages(self, messages): + """Redirect messages to the dummy outbox""" + msg_count = 0 + for message in messages: # .message() triggers header validation + message.message() + mail.outbox.append(message) + msg_count += 1 + return msg_count diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/smtp.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/smtp.py new file mode 100644 index 0000000000000000000000000000000000000000..d061fb575003e4b3ba9e8a34cfbd5c8b5ab01bea --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/backends/smtp.py @@ -0,0 +1,131 @@ +"""SMTP email backend class.""" +import smtplib +import socket +import ssl +import threading + +from django.conf import settings +from django.core.mail.backends.base import BaseEmailBackend +from django.core.mail.message import sanitize_address +from django.core.mail.utils import DNS_NAME + + +class EmailBackend(BaseEmailBackend): + """ + A wrapper that manages the SMTP network connection. + """ + def __init__(self, host=None, port=None, username=None, password=None, + use_tls=None, fail_silently=False, use_ssl=None, timeout=None, + ssl_keyfile=None, ssl_certfile=None, + **kwargs): + super().__init__(fail_silently=fail_silently) + self.host = host or settings.EMAIL_HOST + self.port = port or settings.EMAIL_PORT + self.username = settings.EMAIL_HOST_USER if username is None else username + self.password = settings.EMAIL_HOST_PASSWORD if password is None else password + self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls + self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl + self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout + self.ssl_keyfile = settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile + self.ssl_certfile = settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile + if self.use_ssl and self.use_tls: + raise ValueError( + "EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set " + "one of those settings to True.") + self.connection = None + self._lock = threading.RLock() + + @property + def connection_class(self): + return smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP + + def open(self): + """ + Ensure an open connection to the email server. Return whether or not a + new connection was required (True or False) or None if an exception + passed silently. + """ + if self.connection: + # Nothing to do if the connection is already open. + return False + + # If local_hostname is not specified, socket.getfqdn() gets used. + # For performance, we use the cached FQDN for local_hostname. + connection_params = {'local_hostname': DNS_NAME.get_fqdn()} + if self.timeout is not None: + connection_params['timeout'] = self.timeout + if self.use_ssl: + connection_params.update({ + 'keyfile': self.ssl_keyfile, + 'certfile': self.ssl_certfile, + }) + try: + self.connection = self.connection_class(self.host, self.port, **connection_params) + + # TLS/SSL are mutually exclusive, so only attempt TLS over + # non-secure connections. + if not self.use_ssl and self.use_tls: + self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile) + if self.username and self.password: + self.connection.login(self.username, self.password) + return True + except (smtplib.SMTPException, socket.error): + if not self.fail_silently: + raise + + def close(self): + """Close the connection to the email server.""" + if self.connection is None: + return + try: + try: + self.connection.quit() + except (ssl.SSLError, smtplib.SMTPServerDisconnected): + # This happens when calling quit() on a TLS connection + # sometimes, or when the connection was already disconnected + # by the server. + self.connection.close() + except smtplib.SMTPException: + if self.fail_silently: + return + raise + finally: + self.connection = None + + def send_messages(self, email_messages): + """ + Send one or more EmailMessage objects and return the number of email + messages sent. + """ + if not email_messages: + return + with self._lock: + new_conn_created = self.open() + if not self.connection or new_conn_created is None: + # We failed silently on open(). + # Trying to send would be pointless. + return + num_sent = 0 + for message in email_messages: + sent = self._send(message) + if sent: + num_sent += 1 + if new_conn_created: + self.close() + return num_sent + + def _send(self, email_message): + """A helper method that does the actual sending.""" + if not email_message.recipients(): + return False + encoding = email_message.encoding or settings.DEFAULT_CHARSET + from_email = sanitize_address(email_message.from_email, encoding) + recipients = [sanitize_address(addr, encoding) for addr in email_message.recipients()] + message = email_message.message() + try: + self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n')) + except smtplib.SMTPException: + if not self.fail_silently: + raise + return False + return True diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/message.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/message.py new file mode 100644 index 0000000000000000000000000000000000000000..9c68b8be6d3be76794219af5bf0debd2942b37cf --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/message.py @@ -0,0 +1,451 @@ +import mimetypes +import os +from email import ( + charset as Charset, encoders as Encoders, generator, message_from_string, +) +from email.errors import InvalidHeaderDefect, NonASCIILocalPartDefect +from email.header import Header +from email.headerregistry import Address +from email.message import Message +from email.mime.base import MIMEBase +from email.mime.message import MIMEMessage +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from email.utils import formatdate, getaddresses, make_msgid, parseaddr +from io import BytesIO, StringIO + +from django.conf import settings +from django.core.mail.utils import DNS_NAME +from django.utils.encoding import force_text + +# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from +# some spam filters. +utf8_charset = Charset.Charset('utf-8') +utf8_charset.body_encoding = None # Python defaults to BASE64 +utf8_charset_qp = Charset.Charset('utf-8') +utf8_charset_qp.body_encoding = Charset.QP + +# Default MIME type to use on attachments (if it is not explicitly given +# and cannot be guessed). +DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream' + +RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998 + + +class BadHeaderError(ValueError): + pass + + +# Header names that contain structured address data (RFC #5322) +ADDRESS_HEADERS = { + 'from', + 'sender', + 'reply-to', + 'to', + 'cc', + 'bcc', + 'resent-from', + 'resent-sender', + 'resent-to', + 'resent-cc', + 'resent-bcc', +} + + +def forbid_multi_line_headers(name, val, encoding): + """Forbid multi-line headers to prevent header injection.""" + encoding = encoding or settings.DEFAULT_CHARSET + val = str(val) # val may be lazy + if '\n' in val or '\r' in val: + raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name)) + try: + val.encode('ascii') + except UnicodeEncodeError: + if name.lower() in ADDRESS_HEADERS: + val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,))) + else: + val = Header(val, encoding).encode() + else: + if name.lower() == 'subject': + val = Header(val).encode() + return name, val + + +def split_addr(addr, encoding): + """ + Split the address into local part and domain and encode them. + + When non-ascii characters are present in the local part, it must be + MIME-word encoded. The domain name must be idna-encoded if it contains + non-ascii characters. + """ + if '@' in addr: + localpart, domain = addr.split('@', 1) + # Try to get the simplest encoding - ascii if possible so that + # to@example.com doesn't become =?utf-8?q?to?=@example.com. This + # makes unit testing a bit easier and more readable. + try: + localpart.encode('ascii') + except UnicodeEncodeError: + localpart = Header(localpart, encoding).encode() + domain = domain.encode('idna').decode('ascii') + else: + localpart = Header(addr, encoding).encode() + domain = '' + return (localpart, domain) + + +def sanitize_address(addr, encoding): + """ + Format a pair of (name, address) or an email address string. + """ + if not isinstance(addr, tuple): + addr = parseaddr(addr) + nm, addr = addr + localpart, domain = None, None + nm = Header(nm, encoding).encode() + try: + addr.encode('ascii') + except UnicodeEncodeError: # IDN or non-ascii in the local part + localpart, domain = split_addr(addr, encoding) + + # An `email.headerregistry.Address` object is used since + # email.utils.formataddr() naively encodes the name as ascii (see #25986). + if localpart and domain: + address = Address(nm, username=localpart, domain=domain) + return str(address) + + try: + address = Address(nm, addr_spec=addr) + except (InvalidHeaderDefect, NonASCIILocalPartDefect): + localpart, domain = split_addr(addr, encoding) + address = Address(nm, username=localpart, domain=domain) + return str(address) + + +class MIMEMixin: + def as_string(self, unixfrom=False, linesep='\n'): + """Return the entire formatted message as a string. + Optional `unixfrom' when True, means include the Unix From_ envelope + header. + + This overrides the default as_string() implementation to not mangle + lines that begin with 'From '. See bug #13433 for details. + """ + fp = StringIO() + g = generator.Generator(fp, mangle_from_=False) + g.flatten(self, unixfrom=unixfrom, linesep=linesep) + return fp.getvalue() + + def as_bytes(self, unixfrom=False, linesep='\n'): + """Return the entire formatted message as bytes. + Optional `unixfrom' when True, means include the Unix From_ envelope + header. + + This overrides the default as_bytes() implementation to not mangle + lines that begin with 'From '. See bug #13433 for details. + """ + fp = BytesIO() + g = generator.BytesGenerator(fp, mangle_from_=False) + g.flatten(self, unixfrom=unixfrom, linesep=linesep) + return fp.getvalue() + + +class SafeMIMEMessage(MIMEMixin, MIMEMessage): + + def __setitem__(self, name, val): + # message/rfc822 attachments must be ASCII + name, val = forbid_multi_line_headers(name, val, 'ascii') + MIMEMessage.__setitem__(self, name, val) + + +class SafeMIMEText(MIMEMixin, MIMEText): + + def __init__(self, _text, _subtype='plain', _charset=None): + self.encoding = _charset + MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset) + + def __setitem__(self, name, val): + name, val = forbid_multi_line_headers(name, val, self.encoding) + MIMEText.__setitem__(self, name, val) + + def set_payload(self, payload, charset=None): + if charset == 'utf-8': + has_long_lines = any( + len(l.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT + for l in payload.splitlines() + ) + # Quoted-Printable encoding has the side effect of shortening long + # lines, if any (#22561). + charset = utf8_charset_qp if has_long_lines else utf8_charset + MIMEText.set_payload(self, payload, charset=charset) + + +class SafeMIMEMultipart(MIMEMixin, MIMEMultipart): + + def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params): + self.encoding = encoding + MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params) + + def __setitem__(self, name, val): + name, val = forbid_multi_line_headers(name, val, self.encoding) + MIMEMultipart.__setitem__(self, name, val) + + +class EmailMessage: + """A container for email information.""" + content_subtype = 'plain' + mixed_subtype = 'mixed' + encoding = None # None => use settings default + + def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, + connection=None, attachments=None, headers=None, cc=None, + reply_to=None): + """ + Initialize a single email message (which can be sent to multiple + recipients). + """ + if to: + if isinstance(to, str): + raise TypeError('"to" argument must be a list or tuple') + self.to = list(to) + else: + self.to = [] + if cc: + if isinstance(cc, str): + raise TypeError('"cc" argument must be a list or tuple') + self.cc = list(cc) + else: + self.cc = [] + if bcc: + if isinstance(bcc, str): + raise TypeError('"bcc" argument must be a list or tuple') + self.bcc = list(bcc) + else: + self.bcc = [] + if reply_to: + if isinstance(reply_to, str): + raise TypeError('"reply_to" argument must be a list or tuple') + self.reply_to = list(reply_to) + else: + self.reply_to = [] + self.from_email = from_email or settings.DEFAULT_FROM_EMAIL + self.subject = subject + self.body = body + self.attachments = [] + if attachments: + for attachment in attachments: + if isinstance(attachment, MIMEBase): + self.attach(attachment) + else: + self.attach(*attachment) + self.extra_headers = headers or {} + self.connection = connection + + def get_connection(self, fail_silently=False): + from django.core.mail import get_connection + if not self.connection: + self.connection = get_connection(fail_silently=fail_silently) + return self.connection + + def message(self): + encoding = self.encoding or settings.DEFAULT_CHARSET + msg = SafeMIMEText(self.body, self.content_subtype, encoding) + msg = self._create_message(msg) + msg['Subject'] = self.subject + msg['From'] = self.extra_headers.get('From', self.from_email) + msg['To'] = self.extra_headers.get('To', ', '.join(map(str, self.to))) + if self.cc: + msg['Cc'] = ', '.join(str(cc) for cc in self.cc) + if self.reply_to: + msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(str(r) for r in self.reply_to)) + + # Email header names are case-insensitive (RFC 2045), so we have to + # accommodate that when doing comparisons. + header_names = [key.lower() for key in self.extra_headers] + if 'date' not in header_names: + # formatdate() uses stdlib methods to format the date, which use + # the stdlib/OS concept of a timezone, however, Django sets the + # TZ environment variable based on the TIME_ZONE setting which + # will get picked up by formatdate(). + msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) + if 'message-id' not in header_names: + # Use cached DNS_NAME for performance + msg['Message-ID'] = make_msgid(domain=DNS_NAME) + for name, value in self.extra_headers.items(): + if name.lower() in ('from', 'to'): # From and To are already handled + continue + msg[name] = value + return msg + + def recipients(self): + """ + Return a list of all recipients of the email (includes direct + addressees as well as Cc and Bcc entries). + """ + return [email for email in (self.to + self.cc + self.bcc) if email] + + def send(self, fail_silently=False): + """Send the email message.""" + if not self.recipients(): + # Don't bother creating the network connection if there's nobody to + # send to. + return 0 + return self.get_connection(fail_silently).send_messages([self]) + + def attach(self, filename=None, content=None, mimetype=None): + """ + Attach a file with the given filename and content. The filename can + be omitted and the mimetype is guessed, if not provided. + + If the first parameter is a MIMEBase subclass, insert it directly + into the resulting message attachments. + + For a text/* mimetype (guessed or specified), when a bytes object is + specified as content, decode it as UTF-8. If that fails, set the + mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content. + """ + if isinstance(filename, MIMEBase): + assert content is None + assert mimetype is None + self.attachments.append(filename) + else: + assert content is not None + + if not mimetype: + mimetype, _ = mimetypes.guess_type(filename) + if not mimetype: + mimetype = DEFAULT_ATTACHMENT_MIME_TYPE + basetype, subtype = mimetype.split('/', 1) + + if basetype == 'text': + if isinstance(content, bytes): + try: + content = content.decode() + except UnicodeDecodeError: + # If mimetype suggests the file is text but it's + # actually binary, read() raises a UnicodeDecodeError. + mimetype = DEFAULT_ATTACHMENT_MIME_TYPE + + self.attachments.append((filename, content, mimetype)) + + def attach_file(self, path, mimetype=None): + """ + Attach a file from the filesystem. + + Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified + and cannot be guessed. + + For a text/* mimetype (guessed or specified), decode the file's content + as UTF-8. If that fails, set the mimetype to + DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content. + """ + filename = os.path.basename(path) + + with open(path, 'rb') as file: + content = file.read() + self.attach(filename, content, mimetype) + + def _create_message(self, msg): + return self._create_attachments(msg) + + def _create_attachments(self, msg): + if self.attachments: + encoding = self.encoding or settings.DEFAULT_CHARSET + body_msg = msg + msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding) + if self.body or body_msg.is_multipart(): + msg.attach(body_msg) + for attachment in self.attachments: + if isinstance(attachment, MIMEBase): + msg.attach(attachment) + else: + msg.attach(self._create_attachment(*attachment)) + return msg + + def _create_mime_attachment(self, content, mimetype): + """ + Convert the content, mimetype pair into a MIME attachment object. + + If the mimetype is message/rfc822, content may be an + email.Message or EmailMessage object, as well as a str. + """ + basetype, subtype = mimetype.split('/', 1) + if basetype == 'text': + encoding = self.encoding or settings.DEFAULT_CHARSET + attachment = SafeMIMEText(content, subtype, encoding) + elif basetype == 'message' and subtype == 'rfc822': + # Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments + # must not be base64 encoded. + if isinstance(content, EmailMessage): + # convert content into an email.Message first + content = content.message() + elif not isinstance(content, Message): + # For compatibility with existing code, parse the message + # into an email.Message object if it is not one already. + content = message_from_string(force_text(content)) + + attachment = SafeMIMEMessage(content, subtype) + else: + # Encode non-text attachments with base64. + attachment = MIMEBase(basetype, subtype) + attachment.set_payload(content) + Encoders.encode_base64(attachment) + return attachment + + def _create_attachment(self, filename, content, mimetype=None): + """ + Convert the filename, content, mimetype triple into a MIME attachment + object. + """ + attachment = self._create_mime_attachment(content, mimetype) + if filename: + try: + filename.encode('ascii') + except UnicodeEncodeError: + filename = ('utf-8', '', filename) + attachment.add_header('Content-Disposition', 'attachment', + filename=filename) + return attachment + + +class EmailMultiAlternatives(EmailMessage): + """ + A version of EmailMessage that makes it easy to send multipart/alternative + messages. For example, including text and HTML versions of the text is + made easier. + """ + alternative_subtype = 'alternative' + + def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, + connection=None, attachments=None, headers=None, alternatives=None, + cc=None, reply_to=None): + """ + Initialize a single email message (which can be sent to multiple + recipients). + """ + super().__init__( + subject, body, from_email, to, bcc, connection, attachments, + headers, cc, reply_to, + ) + self.alternatives = alternatives or [] + + def attach_alternative(self, content, mimetype): + """Attach an alternative content representation.""" + assert content is not None + assert mimetype is not None + self.alternatives.append((content, mimetype)) + + def _create_message(self, msg): + return self._create_attachments(self._create_alternatives(msg)) + + def _create_alternatives(self, msg): + encoding = self.encoding or settings.DEFAULT_CHARSET + if self.alternatives: + body_msg = msg + msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding) + if self.body: + msg.attach(body_msg) + for alternative in self.alternatives: + msg.attach(self._create_mime_attachment(*alternative)) + return msg diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d18dfe46672bec4ec8a013efd71bed313c4cab76 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/mail/utils.py @@ -0,0 +1,20 @@ +""" +Email message and email sending related helper functions. +""" + +import socket + + +# Cache the hostname, but do it lazily: socket.getfqdn() can take a couple of +# seconds, which slows down the restart of the server. +class CachedDnsName: + def __str__(self): + return self.get_fqdn() + + def get_fqdn(self): + if not hasattr(self, '_fqdn'): + self._fqdn = socket.getfqdn() + return self._fqdn + + +DNS_NAME = CachedDnsName() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77c060c21ce090909b624e86e6a4830d9369dfd7 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/__init__.py @@ -0,0 +1,371 @@ +import functools +import os +import pkgutil +import sys +from collections import OrderedDict, defaultdict +from importlib import import_module + +import django +from django.apps import apps +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.core.management.base import ( + BaseCommand, CommandError, CommandParser, handle_default_options, +) +from django.core.management.color import color_style +from django.utils import autoreload +from django.utils.encoding import force_text + + +def find_commands(management_dir): + """ + Given a path to a management directory, return a list of all the command + names that are available. + """ + command_dir = os.path.join(management_dir, 'commands') + return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir]) + if not is_pkg and not name.startswith('_')] + + +def load_command_class(app_name, name): + """ + Given a command name and an application name, return the Command + class instance. Allow all errors raised by the import process + (ImportError, AttributeError) to propagate. + """ + module = import_module('%s.management.commands.%s' % (app_name, name)) + return module.Command() + + +@functools.lru_cache(maxsize=None) +def get_commands(): + """ + Return a dictionary mapping command names to their callback applications. + + Look for a management.commands package in django.core, and in each + installed application -- if a commands package exists, register all + commands in that package. + + Core commands are always included. If a settings module has been + specified, also include user-defined commands. + + The dictionary is in the format {command_name: app_name}. Key-value + pairs from this dictionary can then be used in calls to + load_command_class(app_name, command_name) + + If a specific version of a command must be loaded (e.g., with the + startapp command), the instantiated module can be placed in the + dictionary in place of the application name. + + The dictionary is cached on the first call and reused on subsequent + calls. + """ + commands = {name: 'django.core' for name in find_commands(__path__[0])} + + if not settings.configured: + return commands + + for app_config in reversed(list(apps.get_app_configs())): + path = os.path.join(app_config.path, 'management') + commands.update({name: app_config.name for name in find_commands(path)}) + + return commands + + +def call_command(command_name, *args, **options): + """ + Call the given command, with the given options and args/kwargs. + + This is the primary API you should use for calling specific commands. + + `command_name` may be a string or a command object. Using a string is + preferred unless the command object is required for further processing or + testing. + + Some examples: + call_command('migrate') + call_command('shell', plain=True) + call_command('sqlmigrate', 'myapp') + + from django.core.management.commands import flush + cmd = flush.Command() + call_command(cmd, verbosity=0, interactive=False) + # Do something with cmd ... + """ + if isinstance(command_name, BaseCommand): + # Command object passed in. + command = command_name + command_name = command.__class__.__module__.split('.')[-1] + else: + # Load the command object by name. + try: + app_name = get_commands()[command_name] + except KeyError: + raise CommandError("Unknown command: %r" % command_name) + + if isinstance(app_name, BaseCommand): + # If the command is already loaded, use it directly. + command = app_name + else: + command = load_command_class(app_name, command_name) + + # Simulate argument parsing to get the option defaults (see #10080 for details). + parser = command.create_parser('', command_name) + # Use the `dest` option name from the parser option + opt_mapping = { + min(s_opt.option_strings).lstrip('-').replace('-', '_'): s_opt.dest + for s_opt in parser._actions if s_opt.option_strings + } + arg_options = {opt_mapping.get(key, key): value for key, value in options.items()} + defaults = parser.parse_args(args=[force_text(a) for a in args]) + defaults = dict(defaults._get_kwargs(), **arg_options) + # Raise an error if any unknown options were passed. + stealth_options = set(command.base_stealth_options + command.stealth_options) + dest_parameters = {action.dest for action in parser._actions} + valid_options = (dest_parameters | stealth_options).union(opt_mapping) + unknown_options = set(options) - valid_options + if unknown_options: + raise TypeError( + "Unknown option(s) for %s command: %s. " + "Valid options are: %s." % ( + command_name, + ', '.join(sorted(unknown_options)), + ', '.join(sorted(valid_options)), + ) + ) + # Move positional args out of options to mimic legacy optparse + args = defaults.pop('args', ()) + if 'skip_checks' not in options: + defaults['skip_checks'] = True + + return command.execute(*args, **defaults) + + +class ManagementUtility: + """ + Encapsulate the logic of the django-admin and manage.py utilities. + """ + def __init__(self, argv=None): + self.argv = argv or sys.argv[:] + self.prog_name = os.path.basename(self.argv[0]) + if self.prog_name == '__main__.py': + self.prog_name = 'python -m django' + self.settings_exception = None + + def main_help_text(self, commands_only=False): + """Return the script's main help text, as a string.""" + if commands_only: + usage = sorted(get_commands()) + else: + usage = [ + "", + "Type '%s help ' for help on a specific subcommand." % self.prog_name, + "", + "Available subcommands:", + ] + commands_dict = defaultdict(lambda: []) + for name, app in get_commands().items(): + if app == 'django.core': + app = 'django' + else: + app = app.rpartition('.')[-1] + commands_dict[app].append(name) + style = color_style() + for app in sorted(commands_dict): + usage.append("") + usage.append(style.NOTICE("[%s]" % app)) + for name in sorted(commands_dict[app]): + usage.append(" %s" % name) + # Output an extra note if settings are not properly configured + if self.settings_exception is not None: + usage.append(style.NOTICE( + "Note that only Django core commands are listed " + "as settings are not properly configured (error: %s)." + % self.settings_exception)) + + return '\n'.join(usage) + + def fetch_command(self, subcommand): + """ + Try to fetch the given subcommand, printing a message with the + appropriate command called from the command line (usually + "django-admin" or "manage.py") if it can't be found. + """ + # Get commands outside of try block to prevent swallowing exceptions + commands = get_commands() + try: + app_name = commands[subcommand] + except KeyError: + if os.environ.get('DJANGO_SETTINGS_MODULE'): + # If `subcommand` is missing due to misconfigured settings, the + # following line will retrigger an ImproperlyConfigured exception + # (get_commands() swallows the original one) so the user is + # informed about it. + settings.INSTALLED_APPS + else: + sys.stderr.write("No Django settings specified.\n") + sys.stderr.write( + "Unknown command: %r\nType '%s help' for usage.\n" + % (subcommand, self.prog_name) + ) + sys.exit(1) + if isinstance(app_name, BaseCommand): + # If the command is already loaded, use it directly. + klass = app_name + else: + klass = load_command_class(app_name, subcommand) + return klass + + def autocomplete(self): + """ + Output completion suggestions for BASH. + + The output of this function is passed to BASH's `COMREPLY` variable and + treated as completion suggestions. `COMREPLY` expects a space + separated string as the result. + + The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used + to get information about the cli input. Please refer to the BASH + man-page for more information about this variables. + + Subcommand options are saved as pairs. A pair consists of + the long option string (e.g. '--exclude') and a boolean + value indicating if the option requires arguments. When printing to + stdout, an equal sign is appended to options which require arguments. + + Note: If debugging this function, it is recommended to write the debug + output in a separate file. Otherwise the debug output will be treated + and formatted as potential completion suggestions. + """ + # Don't complete if user hasn't sourced bash_completion file. + if 'DJANGO_AUTO_COMPLETE' not in os.environ: + return + + cwords = os.environ['COMP_WORDS'].split()[1:] + cword = int(os.environ['COMP_CWORD']) + + try: + curr = cwords[cword - 1] + except IndexError: + curr = '' + + subcommands = list(get_commands()) + ['help'] + options = [('--help', False)] + + # subcommand + if cword == 1: + print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands)))) + # subcommand options + # special case: the 'help' subcommand has no options + elif cwords[0] in subcommands and cwords[0] != 'help': + subcommand_cls = self.fetch_command(cwords[0]) + # special case: add the names of installed apps to options + if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'): + try: + app_configs = apps.get_app_configs() + # Get the last part of the dotted path as the app name. + options.extend((app_config.label, 0) for app_config in app_configs) + except ImportError: + # Fail silently if DJANGO_SETTINGS_MODULE isn't set. The + # user will find out once they execute the command. + pass + parser = subcommand_cls.create_parser('', cwords[0]) + options.extend( + (min(s_opt.option_strings), s_opt.nargs != 0) + for s_opt in parser._actions if s_opt.option_strings + ) + # filter out previously specified options from available options + prev_opts = {x.split('=')[0] for x in cwords[1:cword - 1]} + options = (opt for opt in options if opt[0] not in prev_opts) + + # filter options by current input + options = sorted((k, v) for k, v in options if k.startswith(curr)) + for opt_label, require_arg in options: + # append '=' to options which require args + if require_arg: + opt_label += '=' + print(opt_label) + # Exit code of the bash completion function is never passed back to + # the user, so it's safe to always exit with 0. + # For more details see #25420. + sys.exit(0) + + def execute(self): + """ + Given the command-line arguments, figure out which subcommand is being + run, create a parser appropriate to that command, and run it. + """ + try: + subcommand = self.argv[1] + except IndexError: + subcommand = 'help' # Display help if no arguments were given. + + # Preprocess options to extract --settings and --pythonpath. + # These options could affect the commands that are available, so they + # must be processed early. + parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False) + parser.add_argument('--settings') + parser.add_argument('--pythonpath') + parser.add_argument('args', nargs='*') # catch-all + try: + options, args = parser.parse_known_args(self.argv[2:]) + handle_default_options(options) + except CommandError: + pass # Ignore any option errors at this point. + + try: + settings.INSTALLED_APPS + except ImproperlyConfigured as exc: + self.settings_exception = exc + + if settings.configured: + # Start the auto-reloading dev server even if the code is broken. + # The hardcoded condition is a code smell but we can't rely on a + # flag on the command class because we haven't located it yet. + if subcommand == 'runserver' and '--noreload' not in self.argv: + try: + autoreload.check_errors(django.setup)() + except Exception: + # The exception will be raised later in the child process + # started by the autoreloader. Pretend it didn't happen by + # loading an empty list of applications. + apps.all_models = defaultdict(OrderedDict) + apps.app_configs = OrderedDict() + apps.apps_ready = apps.models_ready = apps.ready = True + + # Remove options not compatible with the built-in runserver + # (e.g. options for the contrib.staticfiles' runserver). + # Changes here require manually testing as described in + # #27522. + _parser = self.fetch_command('runserver').create_parser('django', 'runserver') + _options, _args = _parser.parse_known_args(self.argv[2:]) + for _arg in _args: + self.argv.remove(_arg) + + # In all other cases, django.setup() is required to succeed. + else: + django.setup() + + self.autocomplete() + + if subcommand == 'help': + if '--commands' in args: + sys.stdout.write(self.main_help_text(commands_only=True) + '\n') + elif len(options.args) < 1: + sys.stdout.write(self.main_help_text() + '\n') + else: + self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0]) + # Special-cases: We want 'django-admin --version' and + # 'django-admin --help' to work, for backwards compatibility. + elif subcommand == 'version' or self.argv[1:] == ['--version']: + sys.stdout.write(django.get_version() + '\n') + elif self.argv[1:] in (['--help'], ['-h']): + sys.stdout.write(self.main_help_text() + '\n') + else: + self.fetch_command(subcommand).run_from_argv(self.argv) + + +def execute_from_command_line(argv=None): + """Run a ManagementUtility.""" + utility = ManagementUtility(argv) + utility.execute() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/base.py new file mode 100644 index 0000000000000000000000000000000000000000..41b6b0fa91faf79e9f83f3b11096a9c8f7cfa389 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/base.py @@ -0,0 +1,522 @@ +""" +Base classes for writing management commands (named commands which can +be executed through ``django-admin`` or ``manage.py``). +""" +import os +import sys +from argparse import ArgumentParser +from io import TextIOBase + +import django +from django.core import checks +from django.core.exceptions import ImproperlyConfigured +from django.core.management.color import color_style, no_style +from django.db import DEFAULT_DB_ALIAS, connections + + +class CommandError(Exception): + """ + Exception class indicating a problem while executing a management + command. + + If this exception is raised during the execution of a management + command, it will be caught and turned into a nicely-printed error + message to the appropriate output stream (i.e., stderr); as a + result, raising this exception (with a sensible description of the + error) is the preferred way to indicate that something has gone + wrong in the execution of a command. + """ + pass + + +class SystemCheckError(CommandError): + """ + The system check framework detected unrecoverable errors. + """ + pass + + +class CommandParser(ArgumentParser): + """ + Customized ArgumentParser class to improve some error messages and prevent + SystemExit in several occasions, as SystemExit is unacceptable when a + command is called programmatically. + """ + def __init__(self, cmd, **kwargs): + self.cmd = cmd + super().__init__(**kwargs) + + def parse_args(self, args=None, namespace=None): + # Catch missing argument for a better error message + if (hasattr(self.cmd, 'missing_args_message') and + not (args or any(not arg.startswith('-') for arg in args))): + self.error(self.cmd.missing_args_message) + return super().parse_args(args, namespace) + + def error(self, message): + if self.cmd._called_from_command_line: + super().error(message) + else: + raise CommandError("Error: %s" % message) + + +def handle_default_options(options): + """ + Include any default options that all commands should accept here + so that ManagementUtility can handle them before searching for + user commands. + """ + if options.settings: + os.environ['DJANGO_SETTINGS_MODULE'] = options.settings + if options.pythonpath: + sys.path.insert(0, options.pythonpath) + + +class OutputWrapper(TextIOBase): + """ + Wrapper around stdout/stderr + """ + @property + def style_func(self): + return self._style_func + + @style_func.setter + def style_func(self, style_func): + if style_func and self.isatty(): + self._style_func = style_func + else: + self._style_func = lambda x: x + + def __init__(self, out, style_func=None, ending='\n'): + self._out = out + self.style_func = None + self.ending = ending + + def __getattr__(self, name): + return getattr(self._out, name) + + def isatty(self): + return hasattr(self._out, 'isatty') and self._out.isatty() + + def write(self, msg, style_func=None, ending=None): + ending = self.ending if ending is None else ending + if ending and not msg.endswith(ending): + msg += ending + style_func = style_func or self.style_func + self._out.write(style_func(msg)) + + +class BaseCommand: + """ + The base class from which all management commands ultimately + derive. + + Use this class if you want access to all of the mechanisms which + parse the command-line arguments and work out what code to call in + response; if you don't need to change any of that behavior, + consider using one of the subclasses defined in this file. + + If you are interested in overriding/customizing various aspects of + the command-parsing and -execution behavior, the normal flow works + as follows: + + 1. ``django-admin`` or ``manage.py`` loads the command class + and calls its ``run_from_argv()`` method. + + 2. The ``run_from_argv()`` method calls ``create_parser()`` to get + an ``ArgumentParser`` for the arguments, parses them, performs + any environment changes requested by options like + ``pythonpath``, and then calls the ``execute()`` method, + passing the parsed arguments. + + 3. The ``execute()`` method attempts to carry out the command by + calling the ``handle()`` method with the parsed arguments; any + output produced by ``handle()`` will be printed to standard + output and, if the command is intended to produce a block of + SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``. + + 4. If ``handle()`` or ``execute()`` raised any exception (e.g. + ``CommandError``), ``run_from_argv()`` will instead print an error + message to ``stderr``. + + Thus, the ``handle()`` method is typically the starting point for + subclasses; many built-in commands and command types either place + all of their logic in ``handle()``, or perform some additional + parsing work in ``handle()`` and then delegate from it to more + specialized methods as needed. + + Several attributes affect behavior at various steps along the way: + + ``help`` + A short description of the command, which will be printed in + help messages. + + ``output_transaction`` + A boolean indicating whether the command outputs SQL + statements; if ``True``, the output will automatically be + wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is + ``False``. + + ``requires_migrations_checks`` + A boolean; if ``True``, the command prints a warning if the set of + migrations on disk don't match the migrations in the database. + + ``requires_system_checks`` + A boolean; if ``True``, entire Django project will be checked for errors + prior to executing the command. Default value is ``True``. + To validate an individual application's models + rather than all applications' models, call + ``self.check(app_configs)`` from ``handle()``, where ``app_configs`` + is the list of application's configuration provided by the + app registry. + + ``leave_locale_alone`` + A boolean indicating whether the locale set in settings should be + preserved during the execution of the command instead of translations + being deactivated. + + Default value is ``False``. + + Make sure you know what you are doing if you decide to change the value + of this option in your custom command if it creates database content + that is locale-sensitive and such content shouldn't contain any + translations (like it happens e.g. with django.contrib.auth + permissions) as activating any locale might cause unintended effects. + + ``stealth_options`` + A tuple of any options the command uses which aren't defined by the + argument parser. + """ + # Metadata about this command. + help = '' + + # Configuration shortcuts that alter various logic. + _called_from_command_line = False + output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;" + leave_locale_alone = False + requires_migrations_checks = False + requires_system_checks = True + # Arguments, common to all commands, which aren't defined by the argument + # parser. + base_stealth_options = ('skip_checks', 'stderr', 'stdout') + # Command-specific options not defined by the argument parser. + stealth_options = () + + def __init__(self, stdout=None, stderr=None, no_color=False): + self.stdout = OutputWrapper(stdout or sys.stdout) + self.stderr = OutputWrapper(stderr or sys.stderr) + if no_color: + self.style = no_style() + else: + self.style = color_style() + self.stderr.style_func = self.style.ERROR + + def get_version(self): + """ + Return the Django version, which should be correct for all built-in + Django commands. User-supplied commands can override this method to + return their own version. + """ + return django.get_version() + + def create_parser(self, prog_name, subcommand): + """ + Create and return the ``ArgumentParser`` which will be used to + parse the arguments to this command. + """ + parser = CommandParser( + self, prog="%s %s" % (os.path.basename(prog_name), subcommand), + description=self.help or None, + ) + parser.add_argument('--version', action='version', version=self.get_version()) + parser.add_argument( + '-v', '--verbosity', action='store', dest='verbosity', default=1, + type=int, choices=[0, 1, 2, 3], + help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output', + ) + parser.add_argument( + '--settings', + help=( + 'The Python path to a settings module, e.g. ' + '"myproject.settings.main". If this isn\'t provided, the ' + 'DJANGO_SETTINGS_MODULE environment variable will be used.' + ), + ) + parser.add_argument( + '--pythonpath', + help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".', + ) + parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions') + parser.add_argument( + '--no-color', action='store_true', dest='no_color', + help="Don't colorize the command output.", + ) + self.add_arguments(parser) + return parser + + def add_arguments(self, parser): + """ + Entry point for subclassed commands to add custom arguments. + """ + pass + + def print_help(self, prog_name, subcommand): + """ + Print the help message for this command, derived from + ``self.usage()``. + """ + parser = self.create_parser(prog_name, subcommand) + parser.print_help() + + def run_from_argv(self, argv): + """ + Set up any environment changes requested (e.g., Python path + and Django settings), then run this command. If the + command raises a ``CommandError``, intercept it and print it sensibly + to stderr. If the ``--traceback`` option is present or the raised + ``Exception`` is not ``CommandError``, raise it. + """ + self._called_from_command_line = True + parser = self.create_parser(argv[0], argv[1]) + + options = parser.parse_args(argv[2:]) + cmd_options = vars(options) + # Move positional args out of options to mimic legacy optparse + args = cmd_options.pop('args', ()) + handle_default_options(options) + try: + self.execute(*args, **cmd_options) + except Exception as e: + if options.traceback or not isinstance(e, CommandError): + raise + + # SystemCheckError takes care of its own formatting. + if isinstance(e, SystemCheckError): + self.stderr.write(str(e), lambda x: x) + else: + self.stderr.write('%s: %s' % (e.__class__.__name__, e)) + sys.exit(1) + finally: + try: + connections.close_all() + except ImproperlyConfigured: + # Ignore if connections aren't setup at this point (e.g. no + # configured settings). + pass + + def execute(self, *args, **options): + """ + Try to execute this command, performing system checks if needed (as + controlled by the ``requires_system_checks`` attribute, except if + force-skipped). + """ + if options['no_color']: + self.style = no_style() + self.stderr.style_func = None + if options.get('stdout'): + self.stdout = OutputWrapper(options['stdout']) + if options.get('stderr'): + self.stderr = OutputWrapper(options['stderr'], self.stderr.style_func) + + saved_locale = None + if not self.leave_locale_alone: + # Deactivate translations, because django-admin creates database + # content like permissions, and those shouldn't contain any + # translations. + from django.utils import translation + saved_locale = translation.get_language() + translation.deactivate_all() + + try: + if self.requires_system_checks and not options.get('skip_checks'): + self.check() + if self.requires_migrations_checks: + self.check_migrations() + output = self.handle(*args, **options) + if output: + if self.output_transaction: + connection = connections[options.get('database', DEFAULT_DB_ALIAS)] + output = '%s\n%s\n%s' % ( + self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()), + output, + self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()), + ) + self.stdout.write(output) + finally: + if saved_locale is not None: + translation.activate(saved_locale) + return output + + def _run_checks(self, **kwargs): + return checks.run_checks(**kwargs) + + def check(self, app_configs=None, tags=None, display_num_errors=False, + include_deployment_checks=False, fail_level=checks.ERROR): + """ + Use the system check framework to validate entire Django project. + Raise CommandError for any serious message (error or critical errors). + If there are only light messages (like warnings), print them to stderr + and don't raise an exception. + """ + all_issues = self._run_checks( + app_configs=app_configs, + tags=tags, + include_deployment_checks=include_deployment_checks, + ) + + header, body, footer = "", "", "" + visible_issue_count = 0 # excludes silenced warnings + + if all_issues: + debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()] + infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()] + warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()] + errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()] + criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()] + sorted_issues = [ + (criticals, 'CRITICALS'), + (errors, 'ERRORS'), + (warnings, 'WARNINGS'), + (infos, 'INFOS'), + (debugs, 'DEBUGS'), + ] + + for issues, group_name in sorted_issues: + if issues: + visible_issue_count += len(issues) + formatted = ( + self.style.ERROR(str(e)) + if e.is_serious() + else self.style.WARNING(str(e)) + for e in issues) + formatted = "\n".join(sorted(formatted)) + body += '\n%s:\n%s\n' % (group_name, formatted) + + if visible_issue_count: + header = "System check identified some issues:\n" + + if display_num_errors: + if visible_issue_count: + footer += '\n' + footer += "System check identified %s (%s silenced)." % ( + "no issues" if visible_issue_count == 0 else + "1 issue" if visible_issue_count == 1 else + "%s issues" % visible_issue_count, + len(all_issues) - visible_issue_count, + ) + + if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues): + msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer + raise SystemCheckError(msg) + else: + msg = header + body + footer + + if msg: + if visible_issue_count: + self.stderr.write(msg, lambda x: x) + else: + self.stdout.write(msg) + + def check_migrations(self): + """ + Print a warning if the set of migrations on disk don't match the + migrations in the database. + """ + from django.db.migrations.executor import MigrationExecutor + try: + executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS]) + except ImproperlyConfigured: + # No databases are configured (or the dummy one) + return + + plan = executor.migration_plan(executor.loader.graph.leaf_nodes()) + if plan: + apps_waiting_migration = sorted({migration.app_label for migration, backwards in plan}) + self.stdout.write( + self.style.NOTICE( + "\nYou have %(unpplied_migration_count)s unapplied migration(s). " + "Your project may not work properly until you apply the " + "migrations for app(s): %(apps_waiting_migration)s." % { + "unpplied_migration_count": len(plan), + "apps_waiting_migration": ", ".join(apps_waiting_migration), + } + ) + ) + self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n")) + + def handle(self, *args, **options): + """ + The actual logic of the command. Subclasses must implement + this method. + """ + raise NotImplementedError('subclasses of BaseCommand must provide a handle() method') + + +class AppCommand(BaseCommand): + """ + A management command which takes one or more installed application labels + as arguments, and does something with each of them. + + Rather than implementing ``handle()``, subclasses must implement + ``handle_app_config()``, which will be called once for each application. + """ + missing_args_message = "Enter at least one application label." + + def add_arguments(self, parser): + parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.') + + def handle(self, *app_labels, **options): + from django.apps import apps + try: + app_configs = [apps.get_app_config(app_label) for app_label in app_labels] + except (LookupError, ImportError) as e: + raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e) + output = [] + for app_config in app_configs: + app_output = self.handle_app_config(app_config, **options) + if app_output: + output.append(app_output) + return '\n'.join(output) + + def handle_app_config(self, app_config, **options): + """ + Perform the command's actions for app_config, an AppConfig instance + corresponding to an application label given on the command line. + """ + raise NotImplementedError( + "Subclasses of AppCommand must provide" + "a handle_app_config() method.") + + +class LabelCommand(BaseCommand): + """ + A management command which takes one or more arbitrary arguments + (labels) on the command line, and does something with each of + them. + + Rather than implementing ``handle()``, subclasses must implement + ``handle_label()``, which will be called once for each label. + + If the arguments should be names of installed applications, use + ``AppCommand`` instead. + """ + label = 'label' + missing_args_message = "Enter at least one %s." % label + + def add_arguments(self, parser): + parser.add_argument('args', metavar=self.label, nargs='+') + + def handle(self, *labels, **options): + output = [] + for label in labels: + label_output = self.handle_label(label, **options) + if label_output: + output.append(label_output) + return '\n'.join(output) + + def handle_label(self, label, **options): + """ + Perform the command's actions for ``label``, which will be the + string as given on the command line. + """ + raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/color.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/color.py new file mode 100644 index 0000000000000000000000000000000000000000..e04c94fed161f3a5f4599cfd04f8445c73d9ef89 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/color.py @@ -0,0 +1,75 @@ +""" +Sets up the terminal color scheme. +""" + +import functools +import os +import sys + +from django.utils import termcolors + + +def supports_color(): + """ + Return True if the running system's terminal supports color, + and False otherwise. + """ + plat = sys.platform + supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) + + # isatty is not always implemented, #6223. + is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() + if not supported_platform or not is_a_tty: + return False + return True + + +class Style: + pass + + +def make_style(config_string=''): + """ + Create a Style object from the given config_string. + + If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used. + """ + + style = Style() + + color_settings = termcolors.parse_color_setting(config_string) + + # The nocolor palette has all available roles. + # Use that palette as the basis for populating + # the palette as defined in the environment. + for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]: + if color_settings: + format = color_settings.get(role, {}) + style_func = termcolors.make_style(**format) + else: + def style_func(x): + return x + setattr(style, role, style_func) + + # For backwards compatibility, + # set style for ERROR_OUTPUT == ERROR + style.ERROR_OUTPUT = style.ERROR + + return style + + +@functools.lru_cache(maxsize=None) +def no_style(): + """ + Return a Style object with no color scheme. + """ + return make_style('nocolor') + + +def color_style(): + """ + Return a Style object from the Django color scheme. + """ + if not supports_color(): + return no_style() + return make_style(os.environ.get('DJANGO_COLORS', '')) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/check.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/check.py new file mode 100644 index 0000000000000000000000000000000000000000..e119960f441c094f9b2ee7c56a904b9f0e43f8ec --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/check.py @@ -0,0 +1,66 @@ +from django.apps import apps +from django.core import checks +from django.core.checks.registry import registry +from django.core.management.base import BaseCommand, CommandError + + +class Command(BaseCommand): + help = "Checks the entire Django project for potential problems." + + requires_system_checks = False + + def add_arguments(self, parser): + parser.add_argument('args', metavar='app_label', nargs='*') + parser.add_argument( + '--tag', '-t', action='append', dest='tags', + help='Run only checks labeled with given tag.', + ) + parser.add_argument( + '--list-tags', action='store_true', dest='list_tags', + help='List available tags.', + ) + parser.add_argument( + '--deploy', action='store_true', dest='deploy', + help='Check deployment settings.', + ) + parser.add_argument( + '--fail-level', + default='ERROR', + choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], + dest='fail_level', + help=( + 'Message level that will cause the command to exit with a ' + 'non-zero status. Default is ERROR.' + ), + ) + + def handle(self, *app_labels, **options): + include_deployment_checks = options['deploy'] + if options['list_tags']: + self.stdout.write('\n'.join(sorted(registry.tags_available(include_deployment_checks)))) + return + + if app_labels: + app_configs = [apps.get_app_config(app_label) for app_label in app_labels] + else: + app_configs = None + + tags = options['tags'] + if tags: + try: + invalid_tag = next( + tag for tag in tags if not checks.tag_exists(tag, include_deployment_checks) + ) + except StopIteration: + # no invalid tags + pass + else: + raise CommandError('There is no system check with the "%s" tag.' % invalid_tag) + + self.check( + app_configs=app_configs, + tags=tags, + display_num_errors=True, + include_deployment_checks=include_deployment_checks, + fail_level=getattr(checks, options['fail_level']), + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/compilemessages.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/compilemessages.py new file mode 100644 index 0000000000000000000000000000000000000000..fcbe24b1f3545fd97262c30af738a610e43a345c --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/compilemessages.py @@ -0,0 +1,131 @@ +import codecs +import glob +import os + +from django.core.management.base import BaseCommand, CommandError +from django.core.management.utils import find_command, popen_wrapper + + +def has_bom(fn): + with open(fn, 'rb') as f: + sample = f.read(4) + return sample.startswith((codecs.BOM_UTF8, codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)) + + +def is_writable(path): + # Known side effect: updating file access/modified time to current time if + # it is writable. + try: + with open(path, 'a'): + os.utime(path, None) + except (IOError, OSError): + return False + return True + + +class Command(BaseCommand): + help = 'Compiles .po files to .mo files for use with builtin gettext support.' + + requires_system_checks = False + leave_locale_alone = True + + program = 'msgfmt' + program_options = ['--check-format'] + + def add_arguments(self, parser): + parser.add_argument( + '--locale', '-l', dest='locale', action='append', default=[], + help='Locale(s) to process (e.g. de_AT). Default is to process all. ' + 'Can be used multiple times.', + ) + parser.add_argument( + '--exclude', '-x', dest='exclude', action='append', default=[], + help='Locales to exclude. Default is none. Can be used multiple times.', + ) + parser.add_argument( + '--use-fuzzy', '-f', dest='fuzzy', action='store_true', + help='Use fuzzy translations.', + ) + + def handle(self, **options): + locale = options['locale'] + exclude = options['exclude'] + self.verbosity = options['verbosity'] + if options['fuzzy']: + self.program_options = self.program_options + ['-f'] + + if find_command(self.program) is None: + raise CommandError("Can't find %s. Make sure you have GNU gettext " + "tools 0.15 or newer installed." % self.program) + + basedirs = [os.path.join('conf', 'locale'), 'locale'] + if os.environ.get('DJANGO_SETTINGS_MODULE'): + from django.conf import settings + basedirs.extend(settings.LOCALE_PATHS) + + # Walk entire tree, looking for locale directories + for dirpath, dirnames, filenames in os.walk('.', topdown=True): + for dirname in dirnames: + if dirname == 'locale': + basedirs.append(os.path.join(dirpath, dirname)) + + # Gather existing directories. + basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs))) + + if not basedirs: + raise CommandError("This script should be run from the Django Git " + "checkout or your project or app tree, or with " + "the settings module specified.") + + # Build locale list + all_locales = [] + for basedir in basedirs: + locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % basedir)) + all_locales.extend(map(os.path.basename, locale_dirs)) + + # Account for excluded locales + locales = locale or all_locales + locales = set(locales).difference(exclude) + + for basedir in basedirs: + if locales: + dirs = [os.path.join(basedir, l, 'LC_MESSAGES') for l in locales] + else: + dirs = [basedir] + locations = [] + for ldir in dirs: + for dirpath, dirnames, filenames in os.walk(ldir): + locations.extend((dirpath, f) for f in filenames if f.endswith('.po')) + if locations: + self.compile_messages(locations) + + def compile_messages(self, locations): + """ + Locations is a list of tuples: [(directory, file), ...] + """ + for i, (dirpath, f) in enumerate(locations): + if self.verbosity > 0: + self.stdout.write('processing file %s in %s\n' % (f, dirpath)) + po_path = os.path.join(dirpath, f) + if has_bom(po_path): + raise CommandError("The %s file has a BOM (Byte Order Mark). " + "Django only supports .po files encoded in " + "UTF-8 and without any BOM." % po_path) + base_path = os.path.splitext(po_path)[0] + + # Check writability on first location + if i == 0 and not is_writable(base_path + '.mo'): + self.stderr.write("The po files under %s are in a seemingly not writable location. " + "mo files will not be updated/created." % dirpath) + return + + args = [self.program] + self.program_options + [ + '-o', base_path + '.mo', base_path + '.po' + ] + output, errors, status = popen_wrapper(args) + if status: + if errors: + msg = "Execution of %s failed: %s" % (self.program, errors) + else: + msg = "Execution of %s failed" % self.program + raise CommandError(msg) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/createcachetable.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/createcachetable.py new file mode 100644 index 0000000000000000000000000000000000000000..8f9482d39010d02527f93c3b75bf09e0af1f7f66 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/createcachetable.py @@ -0,0 +1,108 @@ +from django.conf import settings +from django.core.cache import caches +from django.core.cache.backends.db import BaseDatabaseCache +from django.core.management.base import BaseCommand, CommandError +from django.db import ( + DEFAULT_DB_ALIAS, connections, models, router, transaction, +) +from django.db.utils import DatabaseError + + +class Command(BaseCommand): + help = "Creates the tables needed to use the SQL cache backend." + + requires_system_checks = False + + def add_arguments(self, parser): + parser.add_argument( + 'args', metavar='table_name', nargs='*', + help='Optional table names. Otherwise, settings.CACHES is used to find cache tables.', + ) + parser.add_argument( + '--database', action='store', dest='database', + default=DEFAULT_DB_ALIAS, + help='Nominates a database onto which the cache tables will be ' + 'installed. Defaults to the "default" database.', + ) + parser.add_argument( + '--dry-run', action='store_true', dest='dry_run', + help='Does not create the table, just prints the SQL that would be run.', + ) + + def handle(self, *tablenames, **options): + db = options['database'] + self.verbosity = options['verbosity'] + dry_run = options['dry_run'] + if len(tablenames): + # Legacy behavior, tablename specified as argument + for tablename in tablenames: + self.create_table(db, tablename, dry_run) + else: + for cache_alias in settings.CACHES: + cache = caches[cache_alias] + if isinstance(cache, BaseDatabaseCache): + self.create_table(db, cache._table, dry_run) + + def create_table(self, database, tablename, dry_run): + cache = BaseDatabaseCache(tablename, {}) + if not router.allow_migrate_model(database, cache.cache_model_class): + return + connection = connections[database] + + if tablename in connection.introspection.table_names(): + if self.verbosity > 0: + self.stdout.write("Cache table '%s' already exists." % tablename) + return + + fields = ( + # "key" is a reserved word in MySQL, so use "cache_key" instead. + models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True), + models.TextField(name='value'), + models.DateTimeField(name='expires', db_index=True), + ) + table_output = [] + index_output = [] + qn = connection.ops.quote_name + for f in fields: + field_output = [ + qn(f.name), + f.db_type(connection=connection), + '%sNULL' % ('NOT ' if not f.null else ''), + ] + if f.primary_key: + field_output.append("PRIMARY KEY") + elif f.unique: + field_output.append("UNIQUE") + if f.db_index: + unique = "UNIQUE " if f.unique else "" + index_output.append( + "CREATE %sINDEX %s ON %s (%s);" % + (unique, qn('%s_%s' % (tablename, f.name)), qn(tablename), qn(f.name)) + ) + table_output.append(" ".join(field_output)) + full_statement = ["CREATE TABLE %s (" % qn(tablename)] + for i, line in enumerate(table_output): + full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else '')) + full_statement.append(');') + + full_statement = "\n".join(full_statement) + + if dry_run: + self.stdout.write(full_statement) + for statement in index_output: + self.stdout.write(statement) + return + + with transaction.atomic(using=database, savepoint=connection.features.can_rollback_ddl): + with connection.cursor() as curs: + try: + curs.execute(full_statement) + except DatabaseError as e: + raise CommandError( + "Cache table '%s' could not be created.\nThe error was: %s." % + (tablename, e)) + for statement in index_output: + curs.execute(statement) + + if self.verbosity > 1: + self.stdout.write("Cache table '%s' created." % tablename) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/dbshell.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/dbshell.py new file mode 100644 index 0000000000000000000000000000000000000000..eda1ff68c9c1e57887e5c374732525666d07e927 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/dbshell.py @@ -0,0 +1,31 @@ +from django.core.management.base import BaseCommand, CommandError +from django.db import DEFAULT_DB_ALIAS, connections + + +class Command(BaseCommand): + help = ( + "Runs the command-line client for specified database, or the " + "default database if none is provided." + ) + + requires_system_checks = False + + def add_arguments(self, parser): + parser.add_argument( + '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, + help='Nominates a database onto which to open a shell. Defaults to the "default" database.', + ) + + def handle(self, **options): + connection = connections[options['database']] + try: + connection.client.runshell() + except OSError: + # Note that we're assuming OSError means that the client program + # isn't installed. There's a possibility OSError would be raised + # for some other reason, in which case this error message would be + # inaccurate. Still, this message catches the common case. + raise CommandError( + 'You appear not to have the %r program installed or on your path.' % + connection.client.executable_name + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/diffsettings.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/diffsettings.py new file mode 100644 index 0000000000000000000000000000000000000000..5d1621eb08a9fb6783ea95795cc3bea48569052e --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/diffsettings.py @@ -0,0 +1,78 @@ +from django.core.management.base import BaseCommand + + +def module_to_dict(module, omittable=lambda k: k.startswith('_')): + """Convert a module namespace to a Python dictionary.""" + return {k: repr(v) for k, v in module.__dict__.items() if not omittable(k)} + + +class Command(BaseCommand): + help = """Displays differences between the current settings.py and Django's + default settings.""" + + requires_system_checks = False + + def add_arguments(self, parser): + parser.add_argument( + '--all', action='store_true', dest='all', + help=( + 'Display all settings, regardless of their value. In "hash" ' + 'mode, default values are prefixed by "###".' + ), + ) + parser.add_argument( + '--default', dest='default', metavar='MODULE', default=None, + help=( + "The settings module to compare the current settings against. Leave empty to " + "compare against Django's default settings." + ), + ) + parser.add_argument( + '--output', default='hash', choices=('hash', 'unified'), dest='output', + help=( + "Selects the output format. 'hash' mode displays each changed " + "setting, with the settings that don't appear in the defaults " + "followed by ###. 'unified' mode prefixes the default setting " + "with a minus sign, followed by the changed setting prefixed " + "with a plus sign." + ), + ) + + def handle(self, **options): + from django.conf import settings, Settings, global_settings + + # Because settings are imported lazily, we need to explicitly load them. + settings._setup() + + user_settings = module_to_dict(settings._wrapped) + default = options['default'] + default_settings = module_to_dict(Settings(default) if default else global_settings) + output_func = { + 'hash': self.output_hash, + 'unified': self.output_unified, + }[options['output']] + return '\n'.join(output_func(user_settings, default_settings, **options)) + + def output_hash(self, user_settings, default_settings, **options): + # Inspired by Postfix's "postconf -n". + output = [] + for key in sorted(user_settings): + if key not in default_settings: + output.append("%s = %s ###" % (key, user_settings[key])) + elif user_settings[key] != default_settings[key]: + output.append("%s = %s" % (key, user_settings[key])) + elif options['all']: + output.append("### %s = %s" % (key, user_settings[key])) + return output + + def output_unified(self, user_settings, default_settings, **options): + output = [] + for key in sorted(user_settings): + if key not in default_settings: + output.append(self.style.SUCCESS("+ %s = %s" % (key, user_settings[key]))) + elif user_settings[key] != default_settings[key]: + output.append(self.style.ERROR("- %s = %s" % (key, default_settings[key]))) + output.append(self.style.SUCCESS("+ %s = %s" % (key, user_settings[key]))) + elif options['all']: + output.append(" %s = %s" % (key, user_settings[key])) + return output diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/dumpdata.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/dumpdata.py new file mode 100644 index 0000000000000000000000000000000000000000..d32b191b5a70fd7058ec5514daed759651a168c0 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/dumpdata.py @@ -0,0 +1,194 @@ +import warnings +from collections import OrderedDict + +from django.apps import apps +from django.core import serializers +from django.core.management.base import BaseCommand, CommandError +from django.core.management.utils import parse_apps_and_model_labels +from django.db import DEFAULT_DB_ALIAS, router + + +class ProxyModelWarning(Warning): + pass + + +class Command(BaseCommand): + help = ( + "Output the contents of the database as a fixture of the given format " + "(using each model's default manager unless --all is specified)." + ) + + def add_arguments(self, parser): + parser.add_argument( + 'args', metavar='app_label[.ModelName]', nargs='*', + help='Restricts dumped data to the specified app_label or app_label.ModelName.', + ) + parser.add_argument( + '--format', default='json', dest='format', + help='Specifies the output serialization format for fixtures.', + ) + parser.add_argument( + '--indent', default=None, dest='indent', type=int, + help='Specifies the indent level to use when pretty-printing output.', + ) + parser.add_argument( + '--database', action='store', dest='database', + default=DEFAULT_DB_ALIAS, + help='Nominates a specific database to dump fixtures from. ' + 'Defaults to the "default" database.', + ) + parser.add_argument( + '-e', '--exclude', dest='exclude', action='append', default=[], + help='An app_label or app_label.ModelName to exclude ' + '(use multiple --exclude to exclude multiple apps/models).', + ) + parser.add_argument( + '--natural-foreign', action='store_true', dest='use_natural_foreign_keys', + help='Use natural foreign keys if they are available.', + ) + parser.add_argument( + '--natural-primary', action='store_true', dest='use_natural_primary_keys', + help='Use natural primary keys if they are available.', + ) + parser.add_argument( + '-a', '--all', action='store_true', dest='use_base_manager', + help="Use Django's base manager to dump all models stored in the database, " + "including those that would otherwise be filtered or modified by a custom manager.", + ) + parser.add_argument( + '--pks', dest='primary_keys', + help="Only dump objects with given primary keys. Accepts a comma-separated " + "list of keys. This option only works when you specify one model.", + ) + parser.add_argument( + '-o', '--output', default=None, dest='output', + help='Specifies file to which the output is written.' + ) + + def handle(self, *app_labels, **options): + format = options['format'] + indent = options['indent'] + using = options['database'] + excludes = options['exclude'] + output = options['output'] + show_traceback = options['traceback'] + use_natural_foreign_keys = options['use_natural_foreign_keys'] + use_natural_primary_keys = options['use_natural_primary_keys'] + use_base_manager = options['use_base_manager'] + pks = options['primary_keys'] + + if pks: + primary_keys = [pk.strip() for pk in pks.split(',')] + else: + primary_keys = [] + + excluded_models, excluded_apps = parse_apps_and_model_labels(excludes) + + if len(app_labels) == 0: + if primary_keys: + raise CommandError("You can only use --pks option with one model") + app_list = OrderedDict.fromkeys( + app_config for app_config in apps.get_app_configs() + if app_config.models_module is not None and app_config not in excluded_apps + ) + else: + if len(app_labels) > 1 and primary_keys: + raise CommandError("You can only use --pks option with one model") + app_list = OrderedDict() + for label in app_labels: + try: + app_label, model_label = label.split('.') + try: + app_config = apps.get_app_config(app_label) + except LookupError as e: + raise CommandError(str(e)) + if app_config.models_module is None or app_config in excluded_apps: + continue + try: + model = app_config.get_model(model_label) + except LookupError: + raise CommandError("Unknown model: %s.%s" % (app_label, model_label)) + + app_list_value = app_list.setdefault(app_config, []) + + # We may have previously seen a "all-models" request for + # this app (no model qualifier was given). In this case + # there is no need adding specific models to the list. + if app_list_value is not None: + if model not in app_list_value: + app_list_value.append(model) + except ValueError: + if primary_keys: + raise CommandError("You can only use --pks option with one model") + # This is just an app - no model qualifier + app_label = label + try: + app_config = apps.get_app_config(app_label) + except LookupError as e: + raise CommandError(str(e)) + if app_config.models_module is None or app_config in excluded_apps: + continue + app_list[app_config] = None + + # Check that the serialization format exists; this is a shortcut to + # avoid collating all the objects and _then_ failing. + if format not in serializers.get_public_serializer_formats(): + try: + serializers.get_serializer(format) + except serializers.SerializerDoesNotExist: + pass + + raise CommandError("Unknown serialization format: %s" % format) + + def get_objects(count_only=False): + """ + Collate the objects to be serialized. If count_only is True, just + count the number of objects to be serialized. + """ + models = serializers.sort_dependencies(app_list.items()) + for model in models: + if model in excluded_models: + continue + if model._meta.proxy and model._meta.proxy_for_model not in models: + warnings.warn( + "%s is a proxy model and won't be serialized." % model._meta.label, + category=ProxyModelWarning, + ) + if not model._meta.proxy and router.allow_migrate_model(using, model): + if use_base_manager: + objects = model._base_manager + else: + objects = model._default_manager + + queryset = objects.using(using).order_by(model._meta.pk.name) + if primary_keys: + queryset = queryset.filter(pk__in=primary_keys) + if count_only: + yield queryset.order_by().count() + else: + yield from queryset.iterator() + + try: + self.stdout.ending = None + progress_output = None + object_count = 0 + # If dumpdata is outputting to stdout, there is no way to display progress + if output and self.stdout.isatty() and options['verbosity'] > 0: + progress_output = self.stdout + object_count = sum(get_objects(count_only=True)) + stream = open(output, 'w') if output else None + try: + serializers.serialize( + format, get_objects(), indent=indent, + use_natural_foreign_keys=use_natural_foreign_keys, + use_natural_primary_keys=use_natural_primary_keys, + stream=stream or self.stdout, progress_output=progress_output, + object_count=object_count, + ) + finally: + if stream: + stream.close() + except Exception as e: + if show_traceback: + raise + raise CommandError("Unable to serialize database: %s" % e) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/flush.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/flush.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ae83940a534c18bf9029f0ba7b1f023ab296c2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/flush.py @@ -0,0 +1,82 @@ +from importlib import import_module + +from django.apps import apps +from django.core.management.base import BaseCommand, CommandError +from django.core.management.color import no_style +from django.core.management.sql import emit_post_migrate_signal, sql_flush +from django.db import DEFAULT_DB_ALIAS, connections + + +class Command(BaseCommand): + help = ( + 'Removes ALL DATA from the database, including data added during ' + 'migrations. Does not achieve a "fresh install" state.' + ) + stealth_options = ('reset_sequences', 'allow_cascade', 'inhibit_post_migrate') + + def add_arguments(self, parser): + parser.add_argument( + '--noinput', '--no-input', action='store_false', dest='interactive', + help='Tells Django to NOT prompt the user for input of any kind.', + ) + parser.add_argument( + '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, + help='Nominates a database to flush. Defaults to the "default" database.', + ) + + def handle(self, **options): + database = options['database'] + connection = connections[database] + verbosity = options['verbosity'] + interactive = options['interactive'] + # The following are stealth options used by Django's internals. + reset_sequences = options.get('reset_sequences', True) + allow_cascade = options.get('allow_cascade', False) + inhibit_post_migrate = options.get('inhibit_post_migrate', False) + + self.style = no_style() + + # Import the 'management' module within each installed app, to register + # dispatcher events. + for app_config in apps.get_app_configs(): + try: + import_module('.management', app_config.name) + except ImportError: + pass + + sql_list = sql_flush(self.style, connection, only_django=True, + reset_sequences=reset_sequences, + allow_cascade=allow_cascade) + + if interactive: + confirm = input("""You have requested a flush of the database. +This will IRREVERSIBLY DESTROY all data currently in the %r database, +and return each table to an empty state. +Are you sure you want to do this? + + Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME']) + else: + confirm = 'yes' + + if confirm == 'yes': + try: + connection.ops.execute_sql_flush(database, sql_list) + except Exception as exc: + raise CommandError( + "Database %s couldn't be flushed. Possible reasons:\n" + " * The database isn't running or isn't configured correctly.\n" + " * At least one of the expected database tables doesn't exist.\n" + " * The SQL was invalid.\n" + "Hint: Look at the output of 'django-admin sqlflush'. " + "That's the SQL this command wasn't able to run.\n" % ( + connection.settings_dict['NAME'], + ) + ) from exc + + # Empty sql_list may signify an empty database and post_migrate would then crash + if sql_list and not inhibit_post_migrate: + # Emit the post migrate signal. This allows individual applications to + # respond as if the database had been migrated from scratch. + emit_post_migrate_signal(verbosity, interactive, database) + else: + self.stdout.write("Flush cancelled.\n") diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/inspectdb.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/inspectdb.py new file mode 100644 index 0000000000000000000000000000000000000000..dbcb5d1f383318e454cbe54082b9eb97d6dc7fe0 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/inspectdb.py @@ -0,0 +1,282 @@ +import keyword +import re +from collections import OrderedDict + +from django.core.management.base import BaseCommand, CommandError +from django.db import DEFAULT_DB_ALIAS, connections +from django.db.models.constants import LOOKUP_SEP + + +class Command(BaseCommand): + help = "Introspects the database tables in the given database and outputs a Django model module." + requires_system_checks = False + stealth_options = ('table_name_filter', ) + db_module = 'django.db' + + def add_arguments(self, parser): + parser.add_argument( + 'table', action='store', nargs='*', type=str, + help='Selects what tables or views should be introspected.', + ) + parser.add_argument( + '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, + help='Nominates a database to introspect. Defaults to using the "default" database.', + ) + + def handle(self, **options): + try: + for line in self.handle_inspection(options): + self.stdout.write("%s\n" % line) + except NotImplementedError: + raise CommandError("Database inspection isn't supported for the currently selected database backend.") + + def handle_inspection(self, options): + connection = connections[options['database']] + # 'table_name_filter' is a stealth option + table_name_filter = options.get('table_name_filter') + + def table2model(table_name): + return re.sub(r'[^a-zA-Z0-9]', '', table_name.title()) + + def strip_prefix(s): + return s[1:] if s.startswith("u'") else s + + with connection.cursor() as cursor: + yield "# This is an auto-generated Django model module." + yield "# You'll have to do the following manually to clean this up:" + yield "# * Rearrange models' order" + yield "# * Make sure each model has one field with primary_key=True" + yield "# * Make sure each ForeignKey has `on_delete` set to the desired behavior." + yield ( + "# * Remove `managed = False` lines if you wish to allow " + "Django to create, modify, and delete the table" + ) + yield "# Feel free to rename the models, but don't rename db_table values or field names." + yield 'from %s import models' % self.db_module + known_models = [] + tables_to_introspect = options['table'] or connection.introspection.table_names(cursor) + + for table_name in tables_to_introspect: + if table_name_filter is not None and callable(table_name_filter): + if not table_name_filter(table_name): + continue + try: + try: + relations = connection.introspection.get_relations(cursor, table_name) + except NotImplementedError: + relations = {} + try: + constraints = connection.introspection.get_constraints(cursor, table_name) + except NotImplementedError: + constraints = {} + primary_key_column = connection.introspection.get_primary_key_column(cursor, table_name) + unique_columns = [ + c['columns'][0] for c in constraints.values() + if c['unique'] and len(c['columns']) == 1 + ] + table_description = connection.introspection.get_table_description(cursor, table_name) + except Exception as e: + yield "# Unable to inspect table '%s'" % table_name + yield "# The error was: %s" % e + continue + + yield '' + yield '' + yield 'class %s(models.Model):' % table2model(table_name) + known_models.append(table2model(table_name)) + used_column_names = [] # Holds column names used in the table so far + column_to_field_name = {} # Maps column names to names of model fields + for row in table_description: + comment_notes = [] # Holds Field notes, to be displayed in a Python comment. + extra_params = OrderedDict() # Holds Field parameters such as 'db_column'. + column_name = row[0] + is_relation = column_name in relations + + att_name, params, notes = self.normalize_col_name( + column_name, used_column_names, is_relation) + extra_params.update(params) + comment_notes.extend(notes) + + used_column_names.append(att_name) + column_to_field_name[column_name] = att_name + + # Add primary_key and unique, if necessary. + if column_name == primary_key_column: + extra_params['primary_key'] = True + elif column_name in unique_columns: + extra_params['unique'] = True + + if is_relation: + rel_to = ( + "self" if relations[column_name][1] == table_name + else table2model(relations[column_name][1]) + ) + if rel_to in known_models: + field_type = 'ForeignKey(%s' % rel_to + else: + field_type = "ForeignKey('%s'" % rel_to + else: + # Calling `get_field_type` to get the field type string and any + # additional parameters and notes. + field_type, field_params, field_notes = self.get_field_type(connection, table_name, row) + extra_params.update(field_params) + comment_notes.extend(field_notes) + + field_type += '(' + + # Don't output 'id = meta.AutoField(primary_key=True)', because + # that's assumed if it doesn't exist. + if att_name == 'id' and extra_params == {'primary_key': True}: + if field_type == 'AutoField(': + continue + elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield: + comment_notes.append('AutoField?') + + # Add 'null' and 'blank', if the 'null_ok' flag was present in the + # table description. + if row[6]: # If it's NULL... + if field_type == 'BooleanField(': + field_type = 'NullBooleanField(' + else: + extra_params['blank'] = True + extra_params['null'] = True + + field_desc = '%s = %s%s' % ( + att_name, + # Custom fields will have a dotted path + '' if '.' in field_type else 'models.', + field_type, + ) + if field_type.startswith('ForeignKey('): + field_desc += ', models.DO_NOTHING' + + if extra_params: + if not field_desc.endswith('('): + field_desc += ', ' + field_desc += ', '.join( + '%s=%s' % (k, strip_prefix(repr(v))) + for k, v in extra_params.items()) + field_desc += ')' + if comment_notes: + field_desc += ' # ' + ' '.join(comment_notes) + yield ' %s' % field_desc + for meta_line in self.get_meta(table_name, constraints, column_to_field_name): + yield meta_line + + def normalize_col_name(self, col_name, used_column_names, is_relation): + """ + Modify the column name to make it Python-compatible as a field name + """ + field_params = {} + field_notes = [] + + new_name = col_name.lower() + if new_name != col_name: + field_notes.append('Field name made lowercase.') + + if is_relation: + if new_name.endswith('_id'): + new_name = new_name[:-3] + else: + field_params['db_column'] = col_name + + new_name, num_repl = re.subn(r'\W', '_', new_name) + if num_repl > 0: + field_notes.append('Field renamed to remove unsuitable characters.') + + if new_name.find(LOOKUP_SEP) >= 0: + while new_name.find(LOOKUP_SEP) >= 0: + new_name = new_name.replace(LOOKUP_SEP, '_') + if col_name.lower().find(LOOKUP_SEP) >= 0: + # Only add the comment if the double underscore was in the original name + field_notes.append("Field renamed because it contained more than one '_' in a row.") + + if new_name.startswith('_'): + new_name = 'field%s' % new_name + field_notes.append("Field renamed because it started with '_'.") + + if new_name.endswith('_'): + new_name = '%sfield' % new_name + field_notes.append("Field renamed because it ended with '_'.") + + if keyword.iskeyword(new_name): + new_name += '_field' + field_notes.append('Field renamed because it was a Python reserved word.') + + if new_name[0].isdigit(): + new_name = 'number_%s' % new_name + field_notes.append("Field renamed because it wasn't a valid Python identifier.") + + if new_name in used_column_names: + num = 0 + while '%s_%d' % (new_name, num) in used_column_names: + num += 1 + new_name = '%s_%d' % (new_name, num) + field_notes.append('Field renamed because of name conflict.') + + if col_name != new_name and field_notes: + field_params['db_column'] = col_name + + return new_name, field_params, field_notes + + def get_field_type(self, connection, table_name, row): + """ + Given the database connection, the table name, and the cursor row + description, this routine will return the given field type name, as + well as any additional keyword parameters and notes for the field. + """ + field_params = OrderedDict() + field_notes = [] + + try: + field_type = connection.introspection.get_field_type(row[1], row) + except KeyError: + field_type = 'TextField' + field_notes.append('This field type is a guess.') + + # This is a hook for data_types_reverse to return a tuple of + # (field_type, field_params_dict). + if type(field_type) is tuple: + field_type, new_params = field_type + field_params.update(new_params) + + # Add max_length for all CharFields. + if field_type == 'CharField' and row[3]: + field_params['max_length'] = int(row[3]) + + if field_type == 'DecimalField': + if row[4] is None or row[5] is None: + field_notes.append( + 'max_digits and decimal_places have been guessed, as this ' + 'database handles decimal fields as float') + field_params['max_digits'] = row[4] if row[4] is not None else 10 + field_params['decimal_places'] = row[5] if row[5] is not None else 5 + else: + field_params['max_digits'] = row[4] + field_params['decimal_places'] = row[5] + + return field_type, field_params, field_notes + + def get_meta(self, table_name, constraints, column_to_field_name): + """ + Return a sequence comprising the lines of code necessary + to construct the inner Meta class for the model corresponding + to the given database table name. + """ + unique_together = [] + for index, params in constraints.items(): + if params['unique']: + columns = params['columns'] + if len(columns) > 1: + # we do not want to include the u"" or u'' prefix + # so we build the string rather than interpolate the tuple + tup = '(' + ', '.join("'%s'" % column_to_field_name[c] for c in columns) + ')' + unique_together.append(tup) + meta = ["", + " class Meta:", + " managed = False", + " db_table = '%s'" % table_name] + if unique_together: + tup = '(' + ', '.join(unique_together) + ',)' + meta += [" unique_together = %s" % tup] + return meta diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/loaddata.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/loaddata.py new file mode 100644 index 0000000000000000000000000000000000000000..bed6be14e43e01be8524b43b31ee69c5ed338192 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/loaddata.py @@ -0,0 +1,344 @@ +import functools +import glob +import gzip +import os +import sys +import warnings +import zipfile +from itertools import product + +from django.apps import apps +from django.conf import settings +from django.core import serializers +from django.core.exceptions import ImproperlyConfigured +from django.core.management.base import BaseCommand, CommandError +from django.core.management.color import no_style +from django.core.management.utils import parse_apps_and_model_labels +from django.db import ( + DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router, + transaction, +) +from django.utils.functional import cached_property + +try: + import bz2 + has_bz2 = True +except ImportError: + has_bz2 = False + +READ_STDIN = '-' + + +class Command(BaseCommand): + help = 'Installs the named fixture(s) in the database.' + missing_args_message = ( + "No database fixture specified. Please provide the path of at least " + "one fixture in the command line." + ) + + def add_arguments(self, parser): + parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.') + parser.add_argument( + '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, + help='Nominates a specific database to load fixtures into. Defaults to the "default" database.', + ) + parser.add_argument( + '--app', action='store', dest='app_label', default=None, + help='Only look for fixtures in the specified app.', + ) + parser.add_argument( + '--ignorenonexistent', '-i', action='store_true', dest='ignore', + help='Ignores entries in the serialized data for fields that do not ' + 'currently exist on the model.', + ) + parser.add_argument( + '-e', '--exclude', dest='exclude', action='append', default=[], + help='An app_label or app_label.ModelName to exclude. Can be used multiple times.', + ) + parser.add_argument( + '--format', action='store', dest='format', default=None, + help='Format of serialized data when reading from stdin.', + ) + + def handle(self, *fixture_labels, **options): + self.ignore = options['ignore'] + self.using = options['database'] + self.app_label = options['app_label'] + self.verbosity = options['verbosity'] + self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude']) + self.format = options['format'] + + with transaction.atomic(using=self.using): + self.loaddata(fixture_labels) + + # Close the DB connection -- unless we're still in a transaction. This + # is required as a workaround for an edge case in MySQL: if the same + # connection is used to create tables, load data, and query, the query + # can return incorrect results. See Django #7572, MySQL #37735. + if transaction.get_autocommit(self.using): + connections[self.using].close() + + def loaddata(self, fixture_labels): + connection = connections[self.using] + + # Keep a count of the installed objects and fixtures + self.fixture_count = 0 + self.loaded_object_count = 0 + self.fixture_object_count = 0 + self.models = set() + + self.serialization_formats = serializers.get_public_serializer_formats() + # Forcing binary mode may be revisited after dropping Python 2 support (see #22399) + self.compression_formats = { + None: (open, 'rb'), + 'gz': (gzip.GzipFile, 'rb'), + 'zip': (SingleZipReader, 'r'), + 'stdin': (lambda *args: sys.stdin, None), + } + if has_bz2: + self.compression_formats['bz2'] = (bz2.BZ2File, 'r') + + # Django's test suite repeatedly tries to load initial_data fixtures + # from apps that don't have any fixtures. Because disabling constraint + # checks can be expensive on some database (especially MSSQL), bail + # out early if no fixtures are found. + for fixture_label in fixture_labels: + if self.find_fixtures(fixture_label): + break + else: + return + + with connection.constraint_checks_disabled(): + for fixture_label in fixture_labels: + self.load_label(fixture_label) + + # Since we disabled constraint checks, we must manually check for + # any invalid keys that might have been added + table_names = [model._meta.db_table for model in self.models] + try: + connection.check_constraints(table_names=table_names) + except Exception as e: + e.args = ("Problem installing fixtures: %s" % e,) + raise + + # If we found even one object in a fixture, we need to reset the + # database sequences. + if self.loaded_object_count > 0: + sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models) + if sequence_sql: + if self.verbosity >= 2: + self.stdout.write("Resetting sequences\n") + with connection.cursor() as cursor: + for line in sequence_sql: + cursor.execute(line) + + if self.verbosity >= 1: + if self.fixture_object_count == self.loaded_object_count: + self.stdout.write( + "Installed %d object(s) from %d fixture(s)" + % (self.loaded_object_count, self.fixture_count) + ) + else: + self.stdout.write( + "Installed %d object(s) (of %d) from %d fixture(s)" + % (self.loaded_object_count, self.fixture_object_count, self.fixture_count) + ) + + def load_label(self, fixture_label): + """Load fixtures files for a given label.""" + show_progress = self.verbosity >= 3 + for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label): + _, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file)) + open_method, mode = self.compression_formats[cmp_fmt] + fixture = open_method(fixture_file, mode) + try: + self.fixture_count += 1 + objects_in_fixture = 0 + loaded_objects_in_fixture = 0 + if self.verbosity >= 2: + self.stdout.write( + "Installing %s fixture '%s' from %s." + % (ser_fmt, fixture_name, humanize(fixture_dir)) + ) + + objects = serializers.deserialize( + ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore, + ) + + for obj in objects: + objects_in_fixture += 1 + if (obj.object._meta.app_config in self.excluded_apps or + type(obj.object) in self.excluded_models): + continue + if router.allow_migrate_model(self.using, obj.object.__class__): + loaded_objects_in_fixture += 1 + self.models.add(obj.object.__class__) + try: + obj.save(using=self.using) + if show_progress: + self.stdout.write( + '\rProcessed %i object(s).' % loaded_objects_in_fixture, + ending='' + ) + except (DatabaseError, IntegrityError) as e: + e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % { + 'app_label': obj.object._meta.app_label, + 'object_name': obj.object._meta.object_name, + 'pk': obj.object.pk, + 'error_msg': e, + },) + raise + if objects and show_progress: + self.stdout.write('') # add a newline after progress indicator + self.loaded_object_count += loaded_objects_in_fixture + self.fixture_object_count += objects_in_fixture + except Exception as e: + if not isinstance(e, CommandError): + e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),) + raise + finally: + fixture.close() + + # Warn if the fixture we loaded contains 0 objects. + if objects_in_fixture == 0: + warnings.warn( + "No fixture data found for '%s'. (File format may be " + "invalid.)" % fixture_name, + RuntimeWarning + ) + + @functools.lru_cache(maxsize=None) + def find_fixtures(self, fixture_label): + """Find fixture files for a given label.""" + if fixture_label == READ_STDIN: + return [(READ_STDIN, None, READ_STDIN)] + + fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label) + databases = [self.using, None] + cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt] + ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt] + + if self.verbosity >= 2: + self.stdout.write("Loading '%s' fixtures..." % fixture_name) + + if os.path.isabs(fixture_name): + fixture_dirs = [os.path.dirname(fixture_name)] + fixture_name = os.path.basename(fixture_name) + else: + fixture_dirs = self.fixture_dirs + if os.path.sep in os.path.normpath(fixture_name): + fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name)) + for dir_ in fixture_dirs] + fixture_name = os.path.basename(fixture_name) + + suffixes = ( + '.'.join(ext for ext in combo if ext) + for combo in product(databases, ser_fmts, cmp_fmts) + ) + targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes} + + fixture_files = [] + for fixture_dir in fixture_dirs: + if self.verbosity >= 2: + self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir)) + fixture_files_in_dir = [] + path = os.path.join(fixture_dir, fixture_name) + for candidate in glob.iglob(glob.escape(path) + '*'): + if os.path.basename(candidate) in targets: + # Save the fixture_dir and fixture_name for future error messages. + fixture_files_in_dir.append((candidate, fixture_dir, fixture_name)) + + if self.verbosity >= 2 and not fixture_files_in_dir: + self.stdout.write("No fixture '%s' in %s." % + (fixture_name, humanize(fixture_dir))) + + # Check kept for backwards-compatibility; it isn't clear why + # duplicates are only allowed in different directories. + if len(fixture_files_in_dir) > 1: + raise CommandError( + "Multiple fixtures named '%s' in %s. Aborting." % + (fixture_name, humanize(fixture_dir))) + fixture_files.extend(fixture_files_in_dir) + + if not fixture_files: + raise CommandError("No fixture named '%s' found." % fixture_name) + + return fixture_files + + @cached_property + def fixture_dirs(self): + """ + Return a list of fixture directories. + + The list contains the 'fixtures' subdirectory of each installed + application, if it exists, the directories in FIXTURE_DIRS, and the + current directory. + """ + dirs = [] + fixture_dirs = settings.FIXTURE_DIRS + if len(fixture_dirs) != len(set(fixture_dirs)): + raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.") + for app_config in apps.get_app_configs(): + app_label = app_config.label + app_dir = os.path.join(app_config.path, 'fixtures') + if app_dir in fixture_dirs: + raise ImproperlyConfigured( + "'%s' is a default fixture directory for the '%s' app " + "and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label) + ) + + if self.app_label and app_label != self.app_label: + continue + if os.path.isdir(app_dir): + dirs.append(app_dir) + dirs.extend(list(fixture_dirs)) + dirs.append('') + dirs = [os.path.abspath(os.path.realpath(d)) for d in dirs] + return dirs + + def parse_name(self, fixture_name): + """ + Split fixture name in name, serialization format, compression format. + """ + if fixture_name == READ_STDIN: + if not self.format: + raise CommandError('--format must be specified when reading from stdin.') + return READ_STDIN, self.format, 'stdin' + + parts = fixture_name.rsplit('.', 2) + + if len(parts) > 1 and parts[-1] in self.compression_formats: + cmp_fmt = parts[-1] + parts = parts[:-1] + else: + cmp_fmt = None + + if len(parts) > 1: + if parts[-1] in self.serialization_formats: + ser_fmt = parts[-1] + parts = parts[:-1] + else: + raise CommandError( + "Problem installing fixture '%s': %s is not a known " + "serialization format." % (''.join(parts[:-1]), parts[-1])) + else: + ser_fmt = None + + name = '.'.join(parts) + + return name, ser_fmt, cmp_fmt + + +class SingleZipReader(zipfile.ZipFile): + + def __init__(self, *args, **kwargs): + zipfile.ZipFile.__init__(self, *args, **kwargs) + if len(self.namelist()) != 1: + raise ValueError("Zip-compressed fixtures must contain one file.") + + def read(self): + return zipfile.ZipFile.read(self, self.namelist()[0]) + + +def humanize(dirname): + return "'%s'" % dirname if dirname else 'absolute path' diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/makemessages.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/makemessages.py new file mode 100644 index 0000000000000000000000000000000000000000..cb4d0ad240fba85facb44bab907256d2a3f00478 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/makemessages.py @@ -0,0 +1,690 @@ +import fnmatch +import glob +import os +import re +import sys +from functools import total_ordering +from itertools import dropwhile + +import django +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.core.files.temp import NamedTemporaryFile +from django.core.management.base import BaseCommand, CommandError +from django.core.management.utils import ( + find_command, handle_extensions, popen_wrapper, +) +from django.utils.encoding import DEFAULT_LOCALE_ENCODING +from django.utils.functional import cached_property +from django.utils.jslex import prepare_js_for_gettext +from django.utils.text import get_text_list +from django.utils.translation import templatize + +plural_forms_re = re.compile(r'^(?P"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL) +STATUS_OK = 0 +NO_LOCALE_DIR = object() + + +def check_programs(*programs): + for program in programs: + if find_command(program) is None: + raise CommandError( + "Can't find %s. Make sure you have GNU gettext tools 0.15 or " + "newer installed." % program + ) + + +@total_ordering +class TranslatableFile: + def __init__(self, dirpath, file_name, locale_dir): + self.file = file_name + self.dirpath = dirpath + self.locale_dir = locale_dir + + def __repr__(self): + return "<%s: %s>" % ( + self.__class__.__name__, + os.sep.join([self.dirpath, self.file]), + ) + + def __eq__(self, other): + return self.path == other.path + + def __lt__(self, other): + return self.path < other.path + + @property + def path(self): + return os.path.join(self.dirpath, self.file) + + +class BuildFile: + """ + Represent the state of a translatable file during the build process. + """ + def __init__(self, command, domain, translatable): + self.command = command + self.domain = domain + self.translatable = translatable + + @cached_property + def is_templatized(self): + if self.domain == 'djangojs': + return self.command.gettext_version < (0, 18, 3) + elif self.domain == 'django': + file_ext = os.path.splitext(self.translatable.file)[1] + return file_ext != '.py' + return False + + @cached_property + def path(self): + return self.translatable.path + + @cached_property + def work_path(self): + """ + Path to a file which is being fed into GNU gettext pipeline. This may + be either a translatable or its preprocessed version. + """ + if not self.is_templatized: + return self.path + extension = { + 'djangojs': 'c', + 'django': 'py', + }.get(self.domain) + filename = '%s.%s' % (self.translatable.file, extension) + return os.path.join(self.translatable.dirpath, filename) + + def preprocess(self): + """ + Preprocess (if necessary) a translatable file before passing it to + xgettext GNU gettext utility. + """ + if not self.is_templatized: + return + + encoding = settings.FILE_CHARSET if self.command.settings_available else 'utf-8' + with open(self.path, 'r', encoding=encoding) as fp: + src_data = fp.read() + + if self.domain == 'djangojs': + content = prepare_js_for_gettext(src_data) + elif self.domain == 'django': + content = templatize(src_data, origin=self.path[2:]) + + with open(self.work_path, 'w', encoding='utf-8') as fp: + fp.write(content) + + def postprocess_messages(self, msgs): + """ + Postprocess messages generated by xgettext GNU gettext utility. + + Transform paths as if these messages were generated from original + translatable files rather than from preprocessed versions. + """ + if not self.is_templatized: + return msgs + + # Remove '.py' suffix + if os.name == 'nt': + # Preserve '.\' prefix on Windows to respect gettext behavior + old_path = self.work_path + new_path = self.path + else: + old_path = self.work_path[2:] + new_path = self.path[2:] + + return re.sub( + r'^(#: .*)(' + re.escape(old_path) + r')', + lambda match: match.group().replace(old_path, new_path), + msgs, + flags=re.MULTILINE + ) + + def cleanup(self): + """ + Remove a preprocessed copy of a translatable file (if any). + """ + if self.is_templatized: + # This check is needed for the case of a symlinked file and its + # source being processed inside a single group (locale dir); + # removing either of those two removes both. + if os.path.exists(self.work_path): + os.unlink(self.work_path) + + +def normalize_eols(raw_contents): + """ + Take a block of raw text that will be passed through str.splitlines() to + get universal newlines treatment. + + Return the resulting block of text with normalized `\n` EOL sequences ready + to be written to disk using current platform's native EOLs. + """ + lines_list = raw_contents.splitlines() + # Ensure last line has its EOL + if lines_list and lines_list[-1]: + lines_list.append('') + return '\n'.join(lines_list) + + +def write_pot_file(potfile, msgs): + """ + Write the `potfile` with the `msgs` contents, making sure its format is + valid. + """ + pot_lines = msgs.splitlines() + if os.path.exists(potfile): + # Strip the header + lines = dropwhile(len, pot_lines) + else: + lines = [] + found, header_read = False, False + for line in pot_lines: + if not found and not header_read: + found = True + line = line.replace('charset=CHARSET', 'charset=UTF-8') + if not line and not found: + header_read = True + lines.append(line) + msgs = '\n'.join(lines) + # Force newlines of POT files to '\n' to work around + # https://savannah.gnu.org/bugs/index.php?52395 + with open(potfile, 'a', encoding='utf-8', newline='\n') as fp: + fp.write(msgs) + + +class Command(BaseCommand): + help = ( + "Runs over the entire source tree of the current directory and " + "pulls out all strings marked for translation. It creates (or updates) a message " + "file in the conf/locale (in the django tree) or locale (for projects and " + "applications) directory.\n\nYou must run this command with one of either the " + "--locale, --exclude, or --all options." + ) + + translatable_file_class = TranslatableFile + build_file_class = BuildFile + + requires_system_checks = False + leave_locale_alone = True + + msgmerge_options = ['-q', '--previous'] + msguniq_options = ['--to-code=utf-8'] + msgattrib_options = ['--no-obsolete'] + xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators'] + + def add_arguments(self, parser): + parser.add_argument( + '--locale', '-l', default=[], dest='locale', action='append', + help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). ' + 'Can be used multiple times.', + ) + parser.add_argument( + '--exclude', '-x', default=[], dest='exclude', action='append', + help='Locales to exclude. Default is none. Can be used multiple times.', + ) + parser.add_argument( + '--domain', '-d', default='django', dest='domain', + help='The domain of the message files (default: "django").', + ) + parser.add_argument( + '--all', '-a', action='store_true', dest='all', + help='Updates the message files for all existing locales.', + ) + parser.add_argument( + '--extension', '-e', dest='extensions', action='append', + help='The file extension(s) to examine (default: "html,txt,py", or "js" ' + 'if the domain is "djangojs"). Separate multiple extensions with ' + 'commas, or use -e multiple times.', + ) + parser.add_argument( + '--symlinks', '-s', action='store_true', dest='symlinks', + help='Follows symlinks to directories when examining source code ' + 'and templates for translation strings.', + ) + parser.add_argument( + '--ignore', '-i', action='append', dest='ignore_patterns', + default=[], metavar='PATTERN', + help='Ignore files or directories matching this glob-style pattern. ' + 'Use multiple times to ignore more.', + ) + parser.add_argument( + '--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', + help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.", + ) + parser.add_argument( + '--no-wrap', action='store_true', dest='no_wrap', + help="Don't break long message lines into several lines.", + ) + parser.add_argument( + '--no-location', action='store_true', dest='no_location', + help="Don't write '#: filename:line' lines.", + ) + parser.add_argument( + '--add-location', dest='add_location', + choices=('full', 'file', 'never'), const='full', nargs='?', + help=( + "Controls '#: filename:line' lines. If the option is 'full' " + "(the default if not given), the lines include both file name " + "and line number. If it's 'file', the line number is omitted. If " + "it's 'never', the lines are suppressed (same as --no-location). " + "--add-location requires gettext 0.19 or newer." + ), + ) + parser.add_argument( + '--no-obsolete', action='store_true', dest='no_obsolete', + help="Remove obsolete message strings.", + ) + parser.add_argument( + '--keep-pot', action='store_true', dest='keep_pot', + help="Keep .pot file after making messages. Useful when debugging.", + ) + + def handle(self, *args, **options): + locale = options['locale'] + exclude = options['exclude'] + self.domain = options['domain'] + self.verbosity = options['verbosity'] + process_all = options['all'] + extensions = options['extensions'] + self.symlinks = options['symlinks'] + + ignore_patterns = options['ignore_patterns'] + if options['use_default_ignore_patterns']: + ignore_patterns += ['CVS', '.*', '*~', '*.pyc'] + self.ignore_patterns = list(set(ignore_patterns)) + + # Avoid messing with mutable class variables + if options['no_wrap']: + self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap'] + self.msguniq_options = self.msguniq_options[:] + ['--no-wrap'] + self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap'] + self.xgettext_options = self.xgettext_options[:] + ['--no-wrap'] + if options['no_location']: + self.msgmerge_options = self.msgmerge_options[:] + ['--no-location'] + self.msguniq_options = self.msguniq_options[:] + ['--no-location'] + self.msgattrib_options = self.msgattrib_options[:] + ['--no-location'] + self.xgettext_options = self.xgettext_options[:] + ['--no-location'] + if options['add_location']: + if self.gettext_version < (0, 19): + raise CommandError( + "The --add-location option requires gettext 0.19 or later. " + "You have %s." % '.'.join(str(x) for x in self.gettext_version) + ) + arg_add_location = "--add-location=%s" % options['add_location'] + self.msgmerge_options = self.msgmerge_options[:] + [arg_add_location] + self.msguniq_options = self.msguniq_options[:] + [arg_add_location] + self.msgattrib_options = self.msgattrib_options[:] + [arg_add_location] + self.xgettext_options = self.xgettext_options[:] + [arg_add_location] + + self.no_obsolete = options['no_obsolete'] + self.keep_pot = options['keep_pot'] + + if self.domain not in ('django', 'djangojs'): + raise CommandError("currently makemessages only supports domains " + "'django' and 'djangojs'") + if self.domain == 'djangojs': + exts = extensions if extensions else ['js'] + else: + exts = extensions if extensions else ['html', 'txt', 'py'] + self.extensions = handle_extensions(exts) + + if (locale is None and not exclude and not process_all) or self.domain is None: + raise CommandError( + "Type '%s help %s' for usage information." + % (os.path.basename(sys.argv[0]), sys.argv[1]) + ) + + if self.verbosity > 1: + self.stdout.write( + 'examining files with the extensions: %s\n' + % get_text_list(list(self.extensions), 'and') + ) + + self.invoked_for_django = False + self.locale_paths = [] + self.default_locale_path = None + if os.path.isdir(os.path.join('conf', 'locale')): + self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))] + self.default_locale_path = self.locale_paths[0] + self.invoked_for_django = True + else: + if self.settings_available: + self.locale_paths.extend(settings.LOCALE_PATHS) + # Allow to run makemessages inside an app dir + if os.path.isdir('locale'): + self.locale_paths.append(os.path.abspath('locale')) + if self.locale_paths: + self.default_locale_path = self.locale_paths[0] + if not os.path.exists(self.default_locale_path): + os.makedirs(self.default_locale_path) + + # Build locale list + looks_like_locale = re.compile(r'[a-z]{2}') + locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path)) + all_locales = [ + lang_code for lang_code in map(os.path.basename, locale_dirs) + if looks_like_locale.match(lang_code) + ] + + # Account for excluded locales + if process_all: + locales = all_locales + else: + locales = locale or all_locales + locales = set(locales).difference(exclude) + + if locales: + check_programs('msguniq', 'msgmerge', 'msgattrib') + + check_programs('xgettext') + + try: + potfiles = self.build_potfiles() + + # Build po files for each selected locale + for locale in locales: + if self.verbosity > 0: + self.stdout.write("processing locale %s\n" % locale) + for potfile in potfiles: + self.write_po_file(potfile, locale) + finally: + if not self.keep_pot: + self.remove_potfiles() + + @cached_property + def gettext_version(self): + # Gettext tools will output system-encoded bytestrings instead of UTF-8, + # when looking up the version. It's especially a problem on Windows. + out, err, status = popen_wrapper( + ['xgettext', '--version'], + stdout_encoding=DEFAULT_LOCALE_ENCODING, + ) + m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out) + if m: + return tuple(int(d) for d in m.groups() if d is not None) + else: + raise CommandError("Unable to get gettext version. Is it installed?") + + @cached_property + def settings_available(self): + try: + settings.LOCALE_PATHS + except ImproperlyConfigured: + if self.verbosity > 1: + self.stderr.write("Running without configured settings.") + return False + return True + + def build_potfiles(self): + """ + Build pot files and apply msguniq to them. + """ + file_list = self.find_files(".") + self.remove_potfiles() + self.process_files(file_list) + potfiles = [] + for path in self.locale_paths: + potfile = os.path.join(path, '%s.pot' % self.domain) + if not os.path.exists(potfile): + continue + args = ['msguniq'] + self.msguniq_options + [potfile] + msgs, errors, status = popen_wrapper(args) + if errors: + if status != STATUS_OK: + raise CommandError( + "errors happened while running msguniq\n%s" % errors) + elif self.verbosity > 0: + self.stdout.write(errors) + msgs = normalize_eols(msgs) + with open(potfile, 'w', encoding='utf-8') as fp: + fp.write(msgs) + potfiles.append(potfile) + return potfiles + + def remove_potfiles(self): + for path in self.locale_paths: + pot_path = os.path.join(path, '%s.pot' % self.domain) + if os.path.exists(pot_path): + os.unlink(pot_path) + + def find_files(self, root): + """ + Get all files in the given root. Also check that there is a matching + locale dir for each file. + """ + def is_ignored(path, ignore_patterns): + """ + Check if the given path should be ignored or not. + """ + filename = os.path.basename(path) + + def ignore(pattern): + return fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern) + + return any(ignore(pattern) for pattern in ignore_patterns) + + ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns] + dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}} + norm_patterns = [] + for p in ignore_patterns: + for dir_suffix in dir_suffixes: + if p.endswith(dir_suffix): + norm_patterns.append(p[:-len(dir_suffix)]) + break + else: + norm_patterns.append(p) + + all_files = [] + ignored_roots = [] + if self.settings_available: + ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p] + for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks): + for dirname in dirnames[:]: + if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or + os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots): + dirnames.remove(dirname) + if self.verbosity > 1: + self.stdout.write('ignoring directory %s\n' % dirname) + elif dirname == 'locale': + dirnames.remove(dirname) + self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname)) + for filename in filenames: + file_path = os.path.normpath(os.path.join(dirpath, filename)) + file_ext = os.path.splitext(filename)[1] + if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns): + if self.verbosity > 1: + self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath)) + else: + locale_dir = None + for path in self.locale_paths: + if os.path.abspath(dirpath).startswith(os.path.dirname(path)): + locale_dir = path + break + if not locale_dir: + locale_dir = self.default_locale_path + if not locale_dir: + locale_dir = NO_LOCALE_DIR + all_files.append(self.translatable_file_class(dirpath, filename, locale_dir)) + return sorted(all_files) + + def process_files(self, file_list): + """ + Group translatable files by locale directory and run pot file build + process for each group. + """ + file_groups = {} + for translatable in file_list: + file_group = file_groups.setdefault(translatable.locale_dir, []) + file_group.append(translatable) + for locale_dir, files in file_groups.items(): + self.process_locale_dir(locale_dir, files) + + def process_locale_dir(self, locale_dir, files): + """ + Extract translatable literals from the specified files, creating or + updating the POT file for a given locale directory. + + Use the xgettext GNU gettext utility. + """ + build_files = [] + for translatable in files: + if self.verbosity > 1: + self.stdout.write('processing file %s in %s\n' % ( + translatable.file, translatable.dirpath + )) + if self.domain not in ('djangojs', 'django'): + continue + build_file = self.build_file_class(self, self.domain, translatable) + try: + build_file.preprocess() + except UnicodeDecodeError as e: + self.stdout.write( + 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % ( + translatable.file, translatable.dirpath, e, + ) + ) + continue + build_files.append(build_file) + + if self.domain == 'djangojs': + is_templatized = build_file.is_templatized + args = [ + 'xgettext', + '-d', self.domain, + '--language=%s' % ('C' if is_templatized else 'JavaScript',), + '--keyword=gettext_noop', + '--keyword=gettext_lazy', + '--keyword=ngettext_lazy:1,2', + '--keyword=pgettext:1c,2', + '--keyword=npgettext:1c,2,3', + '--output=-', + ] + elif self.domain == 'django': + args = [ + 'xgettext', + '-d', self.domain, + '--language=Python', + '--keyword=gettext_noop', + '--keyword=gettext_lazy', + '--keyword=ngettext_lazy:1,2', + '--keyword=ugettext_noop', + '--keyword=ugettext_lazy', + '--keyword=ungettext_lazy:1,2', + '--keyword=pgettext:1c,2', + '--keyword=npgettext:1c,2,3', + '--keyword=pgettext_lazy:1c,2', + '--keyword=npgettext_lazy:1c,2,3', + '--output=-', + ] + else: + return + + input_files = [bf.work_path for bf in build_files] + with NamedTemporaryFile(mode='w+') as input_files_list: + input_files_list.write(('\n'.join(input_files))) + input_files_list.flush() + args.extend(['--files-from', input_files_list.name]) + args.extend(self.xgettext_options) + msgs, errors, status = popen_wrapper(args) + + if errors: + if status != STATUS_OK: + for build_file in build_files: + build_file.cleanup() + raise CommandError( + 'errors happened while running xgettext on %s\n%s' % + ('\n'.join(input_files), errors) + ) + elif self.verbosity > 0: + # Print warnings + self.stdout.write(errors) + + if msgs: + if locale_dir is NO_LOCALE_DIR: + file_path = os.path.normpath(build_files[0].path) + raise CommandError( + 'Unable to find a locale path to store translations for ' + 'file %s' % file_path + ) + for build_file in build_files: + msgs = build_file.postprocess_messages(msgs) + potfile = os.path.join(locale_dir, '%s.pot' % self.domain) + write_pot_file(potfile, msgs) + + for build_file in build_files: + build_file.cleanup() + + def write_po_file(self, potfile, locale): + """ + Create or update the PO file for self.domain and `locale`. + Use contents of the existing `potfile`. + + Use msgmerge and msgattrib GNU gettext utilities. + """ + basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES') + if not os.path.isdir(basedir): + os.makedirs(basedir) + pofile = os.path.join(basedir, '%s.po' % self.domain) + + if os.path.exists(pofile): + args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile] + msgs, errors, status = popen_wrapper(args) + if errors: + if status != STATUS_OK: + raise CommandError( + "errors happened while running msgmerge\n%s" % errors) + elif self.verbosity > 0: + self.stdout.write(errors) + else: + with open(potfile, 'r', encoding='utf-8') as fp: + msgs = fp.read() + if not self.invoked_for_django: + msgs = self.copy_plural_forms(msgs, locale) + msgs = normalize_eols(msgs) + msgs = msgs.replace( + "#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "") + with open(pofile, 'w', encoding='utf-8') as fp: + fp.write(msgs) + + if self.no_obsolete: + args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile] + msgs, errors, status = popen_wrapper(args) + if errors: + if status != STATUS_OK: + raise CommandError( + "errors happened while running msgattrib\n%s" % errors) + elif self.verbosity > 0: + self.stdout.write(errors) + + def copy_plural_forms(self, msgs, locale): + """ + Copy plural forms header contents from a Django catalog of locale to + the msgs string, inserting it at the right place. msgs should be the + contents of a newly created .po file. + """ + django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__))) + if self.domain == 'djangojs': + domains = ('djangojs', 'django') + else: + domains = ('django',) + for domain in domains: + django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain) + if os.path.exists(django_po): + with open(django_po, 'r', encoding='utf-8') as fp: + m = plural_forms_re.search(fp.read()) + if m: + plural_form_line = m.group('value') + if self.verbosity > 1: + self.stdout.write("copying plural forms: %s\n" % plural_form_line) + lines = [] + found = False + for line in msgs.splitlines(): + if not found and (not line or plural_forms_re.search(line)): + line = plural_form_line + found = True + lines.append(line) + msgs = '\n'.join(lines) + break + return msgs diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/makemigrations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/makemigrations.py new file mode 100644 index 0000000000000000000000000000000000000000..45fce19b0f9d75b145f8cd8358286fd935474647 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/makemigrations.py @@ -0,0 +1,301 @@ +import os +import sys +from itertools import takewhile + +from django.apps import apps +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError +from django.db import DEFAULT_DB_ALIAS, connections, router +from django.db.migrations import Migration +from django.db.migrations.autodetector import MigrationAutodetector +from django.db.migrations.loader import MigrationLoader +from django.db.migrations.questioner import ( + InteractiveMigrationQuestioner, MigrationQuestioner, + NonInteractiveMigrationQuestioner, +) +from django.db.migrations.state import ProjectState +from django.db.migrations.utils import get_migration_name_timestamp +from django.db.migrations.writer import MigrationWriter + + +class Command(BaseCommand): + help = "Creates new migration(s) for apps." + + def add_arguments(self, parser): + parser.add_argument( + 'args', metavar='app_label', nargs='*', + help='Specify the app label(s) to create migrations for.', + ) + parser.add_argument( + '--dry-run', action='store_true', dest='dry_run', + help="Just show what migrations would be made; don't actually write them.", + ) + parser.add_argument( + '--merge', action='store_true', dest='merge', + help="Enable fixing of migration conflicts.", + ) + parser.add_argument( + '--empty', action='store_true', dest='empty', + help="Create an empty migration.", + ) + parser.add_argument( + '--noinput', '--no-input', action='store_false', dest='interactive', + help='Tells Django to NOT prompt the user for input of any kind.', + ) + parser.add_argument( + '-n', '--name', action='store', dest='name', default=None, + help="Use this name for migration file(s).", + ) + parser.add_argument( + '--check', action='store_true', dest='check_changes', + help='Exit with a non-zero status if model changes are missing migrations.', + ) + + def handle(self, *app_labels, **options): + self.verbosity = options['verbosity'] + self.interactive = options['interactive'] + self.dry_run = options['dry_run'] + self.merge = options['merge'] + self.empty = options['empty'] + self.migration_name = options['name'] + check_changes = options['check_changes'] + + # Make sure the app they asked for exists + app_labels = set(app_labels) + bad_app_labels = set() + for app_label in app_labels: + try: + apps.get_app_config(app_label) + except LookupError: + bad_app_labels.add(app_label) + if bad_app_labels: + for app_label in bad_app_labels: + self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label) + sys.exit(2) + + # Load the current graph state. Pass in None for the connection so + # the loader doesn't try to resolve replaced migrations from DB. + loader = MigrationLoader(None, ignore_no_migrations=True) + + # Raise an error if any migrations are applied before their dependencies. + consistency_check_labels = {config.label for config in apps.get_app_configs()} + # Non-default databases are only checked if database routers used. + aliases_to_check = connections if settings.DATABASE_ROUTERS else [DEFAULT_DB_ALIAS] + for alias in sorted(aliases_to_check): + connection = connections[alias] + if (connection.settings_dict['ENGINE'] != 'django.db.backends.dummy' and any( + # At least one model must be migrated to the database. + router.allow_migrate(connection.alias, app_label, model_name=model._meta.object_name) + for app_label in consistency_check_labels + for model in apps.get_app_config(app_label).get_models() + )): + loader.check_consistent_history(connection) + + # Before anything else, see if there's conflicting apps and drop out + # hard if there are any and they don't want to merge + conflicts = loader.detect_conflicts() + + # If app_labels is specified, filter out conflicting migrations for unspecified apps + if app_labels: + conflicts = { + app_label: conflict for app_label, conflict in conflicts.items() + if app_label in app_labels + } + + if conflicts and not self.merge: + name_str = "; ".join( + "%s in %s" % (", ".join(names), app) + for app, names in conflicts.items() + ) + raise CommandError( + "Conflicting migrations detected; multiple leaf nodes in the " + "migration graph: (%s).\nTo fix them run " + "'python manage.py makemigrations --merge'" % name_str + ) + + # If they want to merge and there's nothing to merge, then politely exit + if self.merge and not conflicts: + self.stdout.write("No conflicts detected to merge.") + return + + # If they want to merge and there is something to merge, then + # divert into the merge code + if self.merge and conflicts: + return self.handle_merge(loader, conflicts) + + if self.interactive: + questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run) + else: + questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run) + # Set up autodetector + autodetector = MigrationAutodetector( + loader.project_state(), + ProjectState.from_apps(apps), + questioner, + ) + + # If they want to make an empty migration, make one for each app + if self.empty: + if not app_labels: + raise CommandError("You must supply at least one app label when using --empty.") + # Make a fake changes() result we can pass to arrange_for_graph + changes = { + app: [Migration("custom", app)] + for app in app_labels + } + changes = autodetector.arrange_for_graph( + changes=changes, + graph=loader.graph, + migration_name=self.migration_name, + ) + self.write_migration_files(changes) + return + + # Detect changes + changes = autodetector.changes( + graph=loader.graph, + trim_to_apps=app_labels or None, + convert_apps=app_labels or None, + migration_name=self.migration_name, + ) + + if not changes: + # No changes? Tell them. + if self.verbosity >= 1: + if len(app_labels) == 1: + self.stdout.write("No changes detected in app '%s'" % app_labels.pop()) + elif len(app_labels) > 1: + self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels))) + else: + self.stdout.write("No changes detected") + else: + self.write_migration_files(changes) + if check_changes: + sys.exit(1) + + def write_migration_files(self, changes): + """ + Take a changes dict and write them out as migration files. + """ + directory_created = {} + for app_label, app_migrations in changes.items(): + if self.verbosity >= 1: + self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n") + for migration in app_migrations: + # Describe the migration + writer = MigrationWriter(migration) + if self.verbosity >= 1: + # Display a relative path if it's below the current working + # directory, or an absolute path otherwise. + try: + migration_string = os.path.relpath(writer.path) + except ValueError: + migration_string = writer.path + if migration_string.startswith('..'): + migration_string = writer.path + self.stdout.write(" %s\n" % (self.style.MIGRATE_LABEL(migration_string),)) + for operation in migration.operations: + self.stdout.write(" - %s\n" % operation.describe()) + if not self.dry_run: + # Write the migrations file to the disk. + migrations_directory = os.path.dirname(writer.path) + if not directory_created.get(app_label): + if not os.path.isdir(migrations_directory): + os.mkdir(migrations_directory) + init_path = os.path.join(migrations_directory, "__init__.py") + if not os.path.isfile(init_path): + open(init_path, "w").close() + # We just do this once per app + directory_created[app_label] = True + migration_string = writer.as_string() + with open(writer.path, "w", encoding='utf-8') as fh: + fh.write(migration_string) + elif self.verbosity == 3: + # Alternatively, makemigrations --dry-run --verbosity 3 + # will output the migrations to stdout rather than saving + # the file to the disk. + self.stdout.write(self.style.MIGRATE_HEADING( + "Full migrations file '%s':" % writer.filename) + "\n" + ) + self.stdout.write("%s\n" % writer.as_string()) + + def handle_merge(self, loader, conflicts): + """ + Handles merging together conflicted migrations interactively, + if it's safe; otherwise, advises on how to fix it. + """ + if self.interactive: + questioner = InteractiveMigrationQuestioner() + else: + questioner = MigrationQuestioner(defaults={'ask_merge': True}) + + for app_label, migration_names in conflicts.items(): + # Grab out the migrations in question, and work out their + # common ancestor. + merge_migrations = [] + for migration_name in migration_names: + migration = loader.get_migration(app_label, migration_name) + migration.ancestry = [ + mig for mig in loader.graph.forwards_plan((app_label, migration_name)) + if mig[0] == migration.app_label + ] + merge_migrations.append(migration) + + def all_items_equal(seq): + return all(item == seq[0] for item in seq[1:]) + + merge_migrations_generations = zip(*(m.ancestry for m in merge_migrations)) + common_ancestor_count = sum(1 for common_ancestor_generation + in takewhile(all_items_equal, merge_migrations_generations)) + if not common_ancestor_count: + raise ValueError("Could not find common ancestor of %s" % migration_names) + # Now work out the operations along each divergent branch + for migration in merge_migrations: + migration.branch = migration.ancestry[common_ancestor_count:] + migrations_ops = (loader.get_migration(node_app, node_name).operations + for node_app, node_name in migration.branch) + migration.merged_operations = sum(migrations_ops, []) + # In future, this could use some of the Optimizer code + # (can_optimize_through) to automatically see if they're + # mergeable. For now, we always just prompt the user. + if self.verbosity > 0: + self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label)) + for migration in merge_migrations: + self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name)) + for operation in migration.merged_operations: + self.stdout.write(" - %s\n" % operation.describe()) + if questioner.ask_merge(app_label): + # If they still want to merge it, then write out an empty + # file depending on the migrations needing merging. + numbers = [ + MigrationAutodetector.parse_number(migration.name) + for migration in merge_migrations + ] + try: + biggest_number = max(x for x in numbers if x is not None) + except ValueError: + biggest_number = 1 + subclass = type("Migration", (Migration, ), { + "dependencies": [(app_label, migration.name) for migration in merge_migrations], + }) + migration_name = "%04i_%s" % ( + biggest_number + 1, + self.migration_name or ("merge_%s" % get_migration_name_timestamp()) + ) + new_migration = subclass(migration_name, app_label) + writer = MigrationWriter(new_migration) + + if not self.dry_run: + # Write the merge migrations file to the disk + with open(writer.path, "w", encoding='utf-8') as fh: + fh.write(writer.as_string()) + if self.verbosity > 0: + self.stdout.write("\nCreated new merge migration %s" % writer.path) + elif self.verbosity == 3: + # Alternatively, makemigrations --merge --dry-run --verbosity 3 + # will output the merge migrations to stdout rather than saving + # the file to the disk. + self.stdout.write(self.style.MIGRATE_HEADING( + "Full merge migrations file '%s':" % writer.filename) + "\n" + ) + self.stdout.write("%s\n" % writer.as_string()) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/migrate.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/migrate.py new file mode 100644 index 0000000000000000000000000000000000000000..e549b7200bd947c8fca066a11217d5720861a8c2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/migrate.py @@ -0,0 +1,307 @@ +import time +from collections import OrderedDict +from importlib import import_module + +from django.apps import apps +from django.core.checks import Tags, run_checks +from django.core.management.base import BaseCommand, CommandError +from django.core.management.sql import ( + emit_post_migrate_signal, emit_pre_migrate_signal, +) +from django.db import DEFAULT_DB_ALIAS, connections, router +from django.db.migrations.autodetector import MigrationAutodetector +from django.db.migrations.executor import MigrationExecutor +from django.db.migrations.loader import AmbiguityError +from django.db.migrations.state import ModelState, ProjectState +from django.utils.module_loading import module_has_submodule + + +class Command(BaseCommand): + help = "Updates database schema. Manages both apps with migrations and those without." + + def add_arguments(self, parser): + parser.add_argument( + 'app_label', nargs='?', + help='App label of an application to synchronize the state.', + ) + parser.add_argument( + 'migration_name', nargs='?', + help='Database state will be brought to the state after that ' + 'migration. Use the name "zero" to unapply all migrations.', + ) + parser.add_argument( + '--noinput', '--no-input', action='store_false', dest='interactive', + help='Tells Django to NOT prompt the user for input of any kind.', + ) + parser.add_argument( + '--database', action='store', dest='database', + default=DEFAULT_DB_ALIAS, + help='Nominates a database to synchronize. Defaults to the "default" database.', + ) + parser.add_argument( + '--fake', action='store_true', dest='fake', + help='Mark migrations as run without actually running them.', + ) + parser.add_argument( + '--fake-initial', action='store_true', dest='fake_initial', + help='Detect if tables already exist and fake-apply initial migrations if so. Make sure ' + 'that the current database schema matches your initial migration before using this ' + 'flag. Django will only check for an existing table name.', + ) + parser.add_argument( + '--run-syncdb', action='store_true', dest='run_syncdb', + help='Creates tables for apps without migrations.', + ) + + def _run_checks(self, **kwargs): + issues = run_checks(tags=[Tags.database]) + issues.extend(super()._run_checks(**kwargs)) + return issues + + def handle(self, *args, **options): + + self.verbosity = options['verbosity'] + self.interactive = options['interactive'] + + # Import the 'management' module within each installed app, to register + # dispatcher events. + for app_config in apps.get_app_configs(): + if module_has_submodule(app_config.module, "management"): + import_module('.management', app_config.name) + + # Get the database we're operating from + db = options['database'] + connection = connections[db] + + # Hook for backends needing any database preparation + connection.prepare_database() + # Work out which apps have migrations and which do not + executor = MigrationExecutor(connection, self.migration_progress_callback) + + # Raise an error if any migrations are applied before their dependencies. + executor.loader.check_consistent_history(connection) + + # Before anything else, see if there's conflicting apps and drop out + # hard if there are any + conflicts = executor.loader.detect_conflicts() + if conflicts: + name_str = "; ".join( + "%s in %s" % (", ".join(names), app) + for app, names in conflicts.items() + ) + raise CommandError( + "Conflicting migrations detected; multiple leaf nodes in the " + "migration graph: (%s).\nTo fix them run " + "'python manage.py makemigrations --merge'" % name_str + ) + + # If they supplied command line arguments, work out what they mean. + target_app_labels_only = True + if options['app_label'] and options['migration_name']: + app_label, migration_name = options['app_label'], options['migration_name'] + if app_label not in executor.loader.migrated_apps: + raise CommandError( + "App '%s' does not have migrations." % app_label + ) + if migration_name == "zero": + targets = [(app_label, None)] + else: + try: + migration = executor.loader.get_migration_by_prefix(app_label, migration_name) + except AmbiguityError: + raise CommandError( + "More than one migration matches '%s' in app '%s'. " + "Please be more specific." % + (migration_name, app_label) + ) + except KeyError: + raise CommandError("Cannot find a migration matching '%s' from app '%s'." % ( + migration_name, app_label)) + targets = [(app_label, migration.name)] + target_app_labels_only = False + elif options['app_label']: + app_label = options['app_label'] + if app_label not in executor.loader.migrated_apps: + raise CommandError( + "App '%s' does not have migrations." % app_label + ) + targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label] + else: + targets = executor.loader.graph.leaf_nodes() + + plan = executor.migration_plan(targets) + run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps + + # Print some useful info + if self.verbosity >= 1: + self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:")) + if run_syncdb: + self.stdout.write( + self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") + + (", ".join(sorted(executor.loader.unmigrated_apps))) + ) + if target_app_labels_only: + self.stdout.write( + self.style.MIGRATE_LABEL(" Apply all migrations: ") + + (", ".join(sorted({a for a, n in targets})) or "(none)") + ) + else: + if targets[0][1] is None: + self.stdout.write(self.style.MIGRATE_LABEL( + " Unapply all migrations: ") + "%s" % (targets[0][0], ) + ) + else: + self.stdout.write(self.style.MIGRATE_LABEL( + " Target specific migration: ") + "%s, from %s" + % (targets[0][1], targets[0][0]) + ) + + pre_migrate_state = executor._create_project_state(with_applied_migrations=True) + pre_migrate_apps = pre_migrate_state.apps + emit_pre_migrate_signal( + self.verbosity, self.interactive, connection.alias, apps=pre_migrate_apps, plan=plan, + ) + + # Run the syncdb phase. + if run_syncdb: + if self.verbosity >= 1: + self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:")) + self.sync_apps(connection, executor.loader.unmigrated_apps) + + # Migrate! + if self.verbosity >= 1: + self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:")) + if not plan: + if self.verbosity >= 1: + self.stdout.write(" No migrations to apply.") + # If there's changes that aren't in migrations yet, tell them how to fix it. + autodetector = MigrationAutodetector( + executor.loader.project_state(), + ProjectState.from_apps(apps), + ) + changes = autodetector.changes(graph=executor.loader.graph) + if changes: + self.stdout.write(self.style.NOTICE( + " Your models have changes that are not yet reflected " + "in a migration, and so won't be applied." + )) + self.stdout.write(self.style.NOTICE( + " Run 'manage.py makemigrations' to make new " + "migrations, and then re-run 'manage.py migrate' to " + "apply them." + )) + fake = False + fake_initial = False + else: + fake = options['fake'] + fake_initial = options['fake_initial'] + post_migrate_state = executor.migrate( + targets, plan=plan, state=pre_migrate_state.clone(), fake=fake, + fake_initial=fake_initial, + ) + # post_migrate signals have access to all models. Ensure that all models + # are reloaded in case any are delayed. + post_migrate_state.clear_delayed_apps_cache() + post_migrate_apps = post_migrate_state.apps + + # Re-render models of real apps to include relationships now that + # we've got a final state. This wouldn't be necessary if real apps + # models were rendered with relationships in the first place. + with post_migrate_apps.bulk_update(): + model_keys = [] + for model_state in post_migrate_apps.real_models: + model_key = model_state.app_label, model_state.name_lower + model_keys.append(model_key) + post_migrate_apps.unregister_model(*model_key) + post_migrate_apps.render_multiple([ + ModelState.from_model(apps.get_model(*model)) for model in model_keys + ]) + + # Send the post_migrate signal, so individual apps can do whatever they need + # to do at this point. + emit_post_migrate_signal( + self.verbosity, self.interactive, connection.alias, apps=post_migrate_apps, plan=plan, + ) + + def migration_progress_callback(self, action, migration=None, fake=False): + if self.verbosity >= 1: + compute_time = self.verbosity > 1 + if action == "apply_start": + if compute_time: + self.start = time.time() + self.stdout.write(" Applying %s..." % migration, ending="") + self.stdout.flush() + elif action == "apply_success": + elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else "" + if fake: + self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) + else: + self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) + elif action == "unapply_start": + if compute_time: + self.start = time.time() + self.stdout.write(" Unapplying %s..." % migration, ending="") + self.stdout.flush() + elif action == "unapply_success": + elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else "" + if fake: + self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) + else: + self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) + elif action == "render_start": + if compute_time: + self.start = time.time() + self.stdout.write(" Rendering model states...", ending="") + self.stdout.flush() + elif action == "render_success": + elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else "" + self.stdout.write(self.style.SUCCESS(" DONE" + elapsed)) + + def sync_apps(self, connection, app_labels): + """Run the old syncdb-style operation on a list of app_labels.""" + with connection.cursor() as cursor: + tables = connection.introspection.table_names(cursor) + + # Build the manifest of apps and models that are to be synchronized. + all_models = [ + ( + app_config.label, + router.get_migratable_models(app_config, connection.alias, include_auto_created=False), + ) + for app_config in apps.get_app_configs() + if app_config.models_module is not None and app_config.label in app_labels + ] + + def model_installed(model): + opts = model._meta + converter = connection.introspection.table_name_converter + return not ( + (converter(opts.db_table) in tables) or + (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables) + ) + + manifest = OrderedDict( + (app_name, list(filter(model_installed, model_list))) + for app_name, model_list in all_models + ) + + # Create the tables for each model + if self.verbosity >= 1: + self.stdout.write(" Creating tables...\n") + with connection.schema_editor() as editor: + for app_name, model_list in manifest.items(): + for model in model_list: + # Never install unmanaged models, etc. + if not model._meta.can_migrate(connection): + continue + if self.verbosity >= 3: + self.stdout.write( + " Processing %s.%s model\n" % (app_name, model._meta.object_name) + ) + if self.verbosity >= 1: + self.stdout.write(" Creating table %s\n" % model._meta.db_table) + editor.create_model(model) + + # Deferred SQL is executed when exiting the editor's context. + if self.verbosity >= 1: + self.stdout.write(" Running deferred SQL...\n") diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/runserver.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/runserver.py new file mode 100644 index 0000000000000000000000000000000000000000..033099923d99c0666a4dbf5b99473e7bf69849bb --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/runserver.py @@ -0,0 +1,164 @@ +import errno +import os +import re +import socket +import sys +from datetime import datetime + +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError +from django.core.servers.basehttp import ( + WSGIServer, get_internal_wsgi_application, run, +) +from django.utils import autoreload + +naiveip_re = re.compile(r"""^(?: +(?P + (?P\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address + (?P\[[a-fA-F0-9:]+\]) | # IPv6 address + (?P[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN +):)?(?P\d+)$""", re.X) + + +class Command(BaseCommand): + help = "Starts a lightweight Web server for development." + + # Validation is called explicitly each time the server is reloaded. + requires_system_checks = False + leave_locale_alone = True + stealth_options = ('shutdown_message',) + + default_addr = '127.0.0.1' + default_addr_ipv6 = '::1' + default_port = '8000' + protocol = 'http' + server_cls = WSGIServer + + def add_arguments(self, parser): + parser.add_argument( + 'addrport', nargs='?', + help='Optional port number, or ipaddr:port' + ) + parser.add_argument( + '--ipv6', '-6', action='store_true', dest='use_ipv6', + help='Tells Django to use an IPv6 address.', + ) + parser.add_argument( + '--nothreading', action='store_false', dest='use_threading', + help='Tells Django to NOT use threading.', + ) + parser.add_argument( + '--noreload', action='store_false', dest='use_reloader', + help='Tells Django to NOT use the auto-reloader.', + ) + + def execute(self, *args, **options): + if options['no_color']: + # We rely on the environment because it's currently the only + # way to reach WSGIRequestHandler. This seems an acceptable + # compromise considering `runserver` runs indefinitely. + os.environ["DJANGO_COLORS"] = "nocolor" + super().execute(*args, **options) + + def get_handler(self, *args, **options): + """Return the default WSGI handler for the runner.""" + return get_internal_wsgi_application() + + def handle(self, *args, **options): + from django.conf import settings + + if not settings.DEBUG and not settings.ALLOWED_HOSTS: + raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.') + + self.use_ipv6 = options['use_ipv6'] + if self.use_ipv6 and not socket.has_ipv6: + raise CommandError('Your Python does not support IPv6.') + self._raw_ipv6 = False + if not options['addrport']: + self.addr = '' + self.port = self.default_port + else: + m = re.match(naiveip_re, options['addrport']) + if m is None: + raise CommandError('"%s" is not a valid port number ' + 'or address:port pair.' % options['addrport']) + self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups() + if not self.port.isdigit(): + raise CommandError("%r is not a valid port number." % self.port) + if self.addr: + if _ipv6: + self.addr = self.addr[1:-1] + self.use_ipv6 = True + self._raw_ipv6 = True + elif self.use_ipv6 and not _fqdn: + raise CommandError('"%s" is not a valid IPv6 address.' % self.addr) + if not self.addr: + self.addr = self.default_addr_ipv6 if self.use_ipv6 else self.default_addr + self._raw_ipv6 = self.use_ipv6 + self.run(**options) + + def run(self, **options): + """Run the server, using the autoreloader if needed.""" + use_reloader = options['use_reloader'] + + if use_reloader: + autoreload.main(self.inner_run, None, options) + else: + self.inner_run(None, **options) + + def inner_run(self, *args, **options): + # If an exception was silenced in ManagementUtility.execute in order + # to be raised in the child process, raise it now. + autoreload.raise_last_exception() + + threading = options['use_threading'] + # 'shutdown_message' is a stealth option. + shutdown_message = options.get('shutdown_message', '') + quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C' + + self.stdout.write("Performing system checks...\n\n") + self.check(display_num_errors=True) + # Need to check migrations here, so can't use the + # requires_migrations_check attribute. + self.check_migrations() + now = datetime.now().strftime('%B %d, %Y - %X') + self.stdout.write(now) + self.stdout.write(( + "Django version %(version)s, using settings %(settings)r\n" + "Starting development server at %(protocol)s://%(addr)s:%(port)s/\n" + "Quit the server with %(quit_command)s.\n" + ) % { + "version": self.get_version(), + "settings": settings.SETTINGS_MODULE, + "protocol": self.protocol, + "addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr, + "port": self.port, + "quit_command": quit_command, + }) + + try: + handler = self.get_handler(*args, **options) + run(self.addr, int(self.port), handler, + ipv6=self.use_ipv6, threading=threading, server_cls=self.server_cls) + except socket.error as e: + # Use helpful error messages instead of ugly tracebacks. + ERRORS = { + errno.EACCES: "You don't have permission to access that port.", + errno.EADDRINUSE: "That port is already in use.", + errno.EADDRNOTAVAIL: "That IP address can't be assigned to.", + } + try: + error_text = ERRORS[e.errno] + except KeyError: + error_text = e + self.stderr.write("Error: %s" % error_text) + # Need to use an OS exit because sys.exit doesn't work in a thread + os._exit(1) + except KeyboardInterrupt: + if shutdown_message: + self.stdout.write(shutdown_message) + sys.exit(0) + + +# Kept for backward compatibility +BaseRunserverCommand = Command diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sendtestemail.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sendtestemail.py new file mode 100644 index 0000000000000000000000000000000000000000..1be789d3dd2308d0d556bd90e3f1b1dfc52b2105 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sendtestemail.py @@ -0,0 +1,40 @@ +import socket + +from django.core.mail import mail_admins, mail_managers, send_mail +from django.core.management.base import BaseCommand +from django.utils import timezone + + +class Command(BaseCommand): + help = "Sends a test email to the email addresses specified as arguments." + missing_args_message = "You must specify some email recipients, or pass the --managers or --admin options." + + def add_arguments(self, parser): + parser.add_argument( + 'email', nargs='*', + help='One or more email addresses to send a test email to.', + ) + parser.add_argument( + '--managers', action='store_true', dest='managers', + help='Send a test email to the addresses specified in settings.MANAGERS.', + ) + parser.add_argument( + '--admins', action='store_true', dest='admins', + help='Send a test email to the addresses specified in settings.ADMINS.', + ) + + def handle(self, *args, **kwargs): + subject = 'Test email from %s on %s' % (socket.gethostname(), timezone.now()) + + send_mail( + subject=subject, + message="If you\'re reading this, it was successful.", + from_email=None, + recipient_list=kwargs['email'], + ) + + if kwargs['managers']: + mail_managers(subject, "This email was sent to the site managers.") + + if kwargs['admins']: + mail_admins(subject, "This email was sent to the site admins.") diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/shell.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/shell.py new file mode 100644 index 0000000000000000000000000000000000000000..7c9a43434a0ffc88f80f3c3a355f3256e4f36499 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/shell.py @@ -0,0 +1,102 @@ +import os +import select +import sys +import traceback + +from django.core.management import BaseCommand, CommandError +from django.utils.datastructures import OrderedSet + + +class Command(BaseCommand): + help = ( + "Runs a Python interactive interpreter. Tries to use IPython or " + "bpython, if one of them is available. Any standard input is executed " + "as code." + ) + + requires_system_checks = False + shells = ['ipython', 'bpython', 'python'] + + def add_arguments(self, parser): + parser.add_argument( + '--no-startup', action='store_true', dest='no_startup', + help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.', + ) + parser.add_argument( + '-i', '--interface', choices=self.shells, dest='interface', + help='Specify an interactive interpreter interface. Available options: "ipython", "bpython", and "python"', + ) + parser.add_argument( + '-c', '--command', dest='command', + help='Instead of opening an interactive shell, run a command as Django and exit.', + ) + + def ipython(self, options): + from IPython import start_ipython + start_ipython(argv=[]) + + def bpython(self, options): + import bpython + bpython.embed() + + def python(self, options): + import code + # Set up a dictionary to serve as the environment for the shell, so + # that tab completion works on objects that are imported at runtime. + imported_objects = {} + try: # Try activating rlcompleter, because it's handy. + import readline + except ImportError: + pass + else: + # We don't have to wrap the following import in a 'try', because + # we already know 'readline' was imported successfully. + import rlcompleter + readline.set_completer(rlcompleter.Completer(imported_objects).complete) + # Enable tab completion on systems using libedit (e.g. macOS). + # These lines are copied from Python's Lib/site.py. + readline_doc = getattr(readline, '__doc__', '') + if readline_doc is not None and 'libedit' in readline_doc: + readline.parse_and_bind("bind ^I rl_complete") + else: + readline.parse_and_bind("tab:complete") + + # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system + # conventions and get $PYTHONSTARTUP first then .pythonrc.py. + if not options['no_startup']: + for pythonrc in OrderedSet([os.environ.get("PYTHONSTARTUP"), os.path.expanduser('~/.pythonrc.py')]): + if not pythonrc: + continue + if not os.path.isfile(pythonrc): + continue + with open(pythonrc) as handle: + pythonrc_code = handle.read() + # Match the behavior of the cpython shell where an error in + # PYTHONSTARTUP prints an exception and continues. + try: + exec(compile(pythonrc_code, pythonrc, 'exec'), imported_objects) + except Exception: + traceback.print_exc() + + code.interact(local=imported_objects) + + def handle(self, **options): + # Execute the command and exit. + if options['command']: + exec(options['command']) + return + + # Execute stdin if it has anything to read and exit. + # Not supported on Windows due to select.select() limitations. + if sys.platform != 'win32' and select.select([sys.stdin], [], [], 0)[0]: + exec(sys.stdin.read()) + return + + available_shells = [options['interface']] if options['interface'] else self.shells + + for shell in available_shells: + try: + return getattr(self, shell)(options) + except ImportError: + pass + raise CommandError("Couldn't import {} interface.".format(shell)) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/showmigrations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/showmigrations.py new file mode 100644 index 0000000000000000000000000000000000000000..4130d4438998b7d531c2e0fabfbd03ad07470c1a --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/showmigrations.py @@ -0,0 +1,131 @@ +from django.core.management.base import BaseCommand, CommandError +from django.db import DEFAULT_DB_ALIAS, connections +from django.db.migrations.loader import MigrationLoader + + +class Command(BaseCommand): + help = "Shows all available migrations for the current project" + + def add_arguments(self, parser): + parser.add_argument( + 'app_label', nargs='*', + help='App labels of applications to limit the output to.', + ) + parser.add_argument( + '--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, + help='Nominates a database to synchronize. Defaults to the "default" database.', + ) + + formats = parser.add_mutually_exclusive_group() + formats.add_argument( + '--list', '-l', action='store_const', dest='format', const='list', + help='Shows a list of all migrations and which are applied.', + ) + formats.add_argument( + '--plan', '-p', action='store_const', dest='format', const='plan', + help=( + 'Shows all migrations in the order they will be applied. ' + 'With a verbosity level of 2 or above all direct migration dependencies ' + 'and reverse dependencies (run_before) will be included.' + ) + ) + + parser.set_defaults(format='list') + + def handle(self, *args, **options): + self.verbosity = options['verbosity'] + + # Get the database we're operating from + db = options['database'] + connection = connections[db] + + if options['format'] == "plan": + return self.show_plan(connection, options['app_label']) + else: + return self.show_list(connection, options['app_label']) + + def _validate_app_names(self, loader, app_names): + invalid_apps = [] + for app_name in app_names: + if app_name not in loader.migrated_apps: + invalid_apps.append(app_name) + if invalid_apps: + raise CommandError('No migrations present for: %s' % (', '.join(sorted(invalid_apps)))) + + def show_list(self, connection, app_names=None): + """ + Show a list of all migrations on the system, or only those of + some named apps. + """ + # Load migrations from disk/DB + loader = MigrationLoader(connection, ignore_no_migrations=True) + graph = loader.graph + # If we were passed a list of apps, validate it + if app_names: + self._validate_app_names(loader, app_names) + # Otherwise, show all apps in alphabetic order + else: + app_names = sorted(loader.migrated_apps) + # For each app, print its migrations in order from oldest (roots) to + # newest (leaves). + for app_name in app_names: + self.stdout.write(app_name, self.style.MIGRATE_LABEL) + shown = set() + for node in graph.leaf_nodes(app_name): + for plan_node in graph.forwards_plan(node): + if plan_node not in shown and plan_node[0] == app_name: + # Give it a nice title if it's a squashed one + title = plan_node[1] + if graph.nodes[plan_node].replaces: + title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces) + # Mark it as applied/unapplied + if plan_node in loader.applied_migrations: + self.stdout.write(" [X] %s" % title) + else: + self.stdout.write(" [ ] %s" % title) + shown.add(plan_node) + # If we didn't print anything, then a small message + if not shown: + self.stdout.write(" (no migrations)", self.style.ERROR) + + def show_plan(self, connection, app_names=None): + """ + Show all known migrations (or only those of the specified app_names) + in the order they will be applied. + """ + # Load migrations from disk/DB + loader = MigrationLoader(connection) + graph = loader.graph + if app_names: + self._validate_app_names(loader, app_names) + targets = [key for key in graph.leaf_nodes() if key[0] in app_names] + else: + targets = graph.leaf_nodes() + plan = [] + seen = set() + + # Generate the plan + for target in targets: + for migration in graph.forwards_plan(target): + if migration not in seen: + node = graph.node_map[migration] + plan.append(node) + seen.add(migration) + + # Output + def print_deps(node): + out = [] + for parent in sorted(node.parents): + out.append("%s.%s" % parent.key) + if out: + return " ... (%s)" % ", ".join(out) + return "" + + for node in plan: + deps = "" + if self.verbosity >= 2: + deps = print_deps(node) + if node.key in loader.applied_migrations: + self.stdout.write("[X] %s.%s%s" % (node.key[0], node.key[1], deps)) + else: + self.stdout.write("[ ] %s.%s%s" % (node.key[0], node.key[1], deps)) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlflush.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlflush.py new file mode 100644 index 0000000000000000000000000000000000000000..60e69e67bf505e41f906e4c0166ccb668b764a5b --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlflush.py @@ -0,0 +1,22 @@ +from django.core.management.base import BaseCommand +from django.core.management.sql import sql_flush +from django.db import DEFAULT_DB_ALIAS, connections + + +class Command(BaseCommand): + help = ( + "Returns a list of the SQL statements required to return all tables in " + "the database to the state they were in just after they were installed." + ) + + output_transaction = True + + def add_arguments(self, parser): + super().add_arguments(parser) + parser.add_argument( + '--database', default=DEFAULT_DB_ALIAS, + help='Nominates a database to print the SQL for. Defaults to the "default" database.', + ) + + def handle(self, **options): + return '\n'.join(sql_flush(self.style, connections[options['database']], only_django=True)) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlmigrate.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlmigrate.py new file mode 100644 index 0000000000000000000000000000000000000000..4d0b08b175621cf2f27ea5fa296f8d528e3e39e2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlmigrate.py @@ -0,0 +1,59 @@ +from django.core.management.base import BaseCommand, CommandError +from django.db import DEFAULT_DB_ALIAS, connections +from django.db.migrations.executor import MigrationExecutor +from django.db.migrations.loader import AmbiguityError + + +class Command(BaseCommand): + help = "Prints the SQL statements for the named migration." + + output_transaction = True + + def add_arguments(self, parser): + parser.add_argument('app_label', help='App label of the application containing the migration.') + parser.add_argument('migration_name', help='Migration name to print the SQL for.') + parser.add_argument( + '--database', default=DEFAULT_DB_ALIAS, + help='Nominates a database to create SQL for. Defaults to the "default" database.', + ) + parser.add_argument( + '--backwards', action='store_true', dest='backwards', + help='Creates SQL to unapply the migration, rather than to apply it', + ) + + def execute(self, *args, **options): + # sqlmigrate doesn't support coloring its output but we need to force + # no_color=True so that the BEGIN/COMMIT statements added by + # output_transaction don't get colored either. + options['no_color'] = True + return super().execute(*args, **options) + + def handle(self, *args, **options): + # Get the database we're operating from + connection = connections[options['database']] + + # Load up an executor to get all the migration data + executor = MigrationExecutor(connection) + + # Resolve command-line arguments into a migration + app_label, migration_name = options['app_label'], options['migration_name'] + if app_label not in executor.loader.migrated_apps: + raise CommandError("App '%s' does not have migrations" % app_label) + try: + migration = executor.loader.get_migration_by_prefix(app_label, migration_name) + except AmbiguityError: + raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % ( + migration_name, app_label)) + except KeyError: + raise CommandError("Cannot find a migration matching '%s' from app '%s'. Is it in INSTALLED_APPS?" % ( + migration_name, app_label)) + targets = [(app_label, migration.name)] + + # Show begin/end around output only for atomic migrations + self.output_transaction = migration.atomic + + # Make a plan that represents just the requested migrations and show SQL + # for it + plan = [(executor.loader.graph.nodes[targets[0]], options['backwards'])] + sql_statements = executor.collect_sql(plan) + return '\n'.join(sql_statements) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlsequencereset.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlsequencereset.py new file mode 100644 index 0000000000000000000000000000000000000000..d23f89ce1f1e819aebb01b35115f1c32d26acce0 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/sqlsequencereset.py @@ -0,0 +1,23 @@ +from django.core.management.base import AppCommand +from django.db import DEFAULT_DB_ALIAS, connections + + +class Command(AppCommand): + help = 'Prints the SQL statements for resetting sequences for the given app name(s).' + + output_transaction = True + + def add_arguments(self, parser): + super().add_arguments(parser) + parser.add_argument( + '--database', default=DEFAULT_DB_ALIAS, + help='Nominates a database to print the SQL for. Defaults to the "default" database.', + ) + + def handle_app_config(self, app_config, **options): + if app_config.models_module is None: + return + connection = connections[options['database']] + models = app_config.get_models(include_auto_created=True) + statements = connection.ops.sequence_reset_sql(self.style, models) + return '\n'.join(statements) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/squashmigrations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/squashmigrations.py new file mode 100644 index 0000000000000000000000000000000000000000..60faf6cc1f2ff516eee71e34f9de0682cc220228 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/squashmigrations.py @@ -0,0 +1,204 @@ +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError +from django.db import DEFAULT_DB_ALIAS, connections, migrations +from django.db.migrations.loader import AmbiguityError, MigrationLoader +from django.db.migrations.migration import SwappableTuple +from django.db.migrations.optimizer import MigrationOptimizer +from django.db.migrations.writer import MigrationWriter +from django.utils.version import get_docs_version + + +class Command(BaseCommand): + help = "Squashes an existing set of migrations (from first until specified) into a single new one." + + def add_arguments(self, parser): + parser.add_argument( + 'app_label', + help='App label of the application to squash migrations for.', + ) + parser.add_argument( + 'start_migration_name', default=None, nargs='?', + help='Migrations will be squashed starting from and including this migration.', + ) + parser.add_argument( + 'migration_name', + help='Migrations will be squashed until and including this migration.', + ) + parser.add_argument( + '--no-optimize', action='store_true', dest='no_optimize', + help='Do not try to optimize the squashed operations.', + ) + parser.add_argument( + '--noinput', '--no-input', action='store_false', dest='interactive', + help='Tells Django to NOT prompt the user for input of any kind.', + ) + parser.add_argument( + '--squashed-name', dest='squashed_name', + help='Sets the name of the new squashed migration.', + ) + + def handle(self, **options): + + self.verbosity = options['verbosity'] + self.interactive = options['interactive'] + app_label = options['app_label'] + start_migration_name = options['start_migration_name'] + migration_name = options['migration_name'] + no_optimize = options['no_optimize'] + squashed_name = options['squashed_name'] + + # Load the current graph state, check the app and migration they asked for exists + loader = MigrationLoader(connections[DEFAULT_DB_ALIAS]) + if app_label not in loader.migrated_apps: + raise CommandError( + "App '%s' does not have migrations (so squashmigrations on " + "it makes no sense)" % app_label + ) + + migration = self.find_migration(loader, app_label, migration_name) + + # Work out the list of predecessor migrations + migrations_to_squash = [ + loader.get_migration(al, mn) + for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name)) + if al == migration.app_label + ] + + if start_migration_name: + start_migration = self.find_migration(loader, app_label, start_migration_name) + start = loader.get_migration(start_migration.app_label, start_migration.name) + try: + start_index = migrations_to_squash.index(start) + migrations_to_squash = migrations_to_squash[start_index:] + except ValueError: + raise CommandError( + "The migration '%s' cannot be found. Maybe it comes after " + "the migration '%s'?\n" + "Have a look at:\n" + " python manage.py showmigrations %s\n" + "to debug this issue." % (start_migration, migration, app_label) + ) + + # Tell them what we're doing and optionally ask if we should proceed + if self.verbosity > 0 or self.interactive: + self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:")) + for migration in migrations_to_squash: + self.stdout.write(" - %s" % migration.name) + + if self.interactive: + answer = None + while not answer or answer not in "yn": + answer = input("Do you wish to proceed? [yN] ") + if not answer: + answer = "n" + break + else: + answer = answer[0].lower() + if answer != "y": + return + + # Load the operations from all those migrations and concat together, + # along with collecting external dependencies and detecting + # double-squashing + operations = [] + dependencies = set() + # We need to take all dependencies from the first migration in the list + # as it may be 0002 depending on 0001 + first_migration = True + for smigration in migrations_to_squash: + if smigration.replaces: + raise CommandError( + "You cannot squash squashed migrations! Please transition " + "it to a normal migration first: " + "https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version() + ) + operations.extend(smigration.operations) + for dependency in smigration.dependencies: + if isinstance(dependency, SwappableTuple): + if settings.AUTH_USER_MODEL == dependency.setting: + dependencies.add(("__setting__", "AUTH_USER_MODEL")) + else: + dependencies.add(dependency) + elif dependency[0] != smigration.app_label or first_migration: + dependencies.add(dependency) + first_migration = False + + if no_optimize: + if self.verbosity > 0: + self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)")) + new_operations = operations + else: + if self.verbosity > 0: + self.stdout.write(self.style.MIGRATE_HEADING("Optimizing...")) + + optimizer = MigrationOptimizer() + new_operations = optimizer.optimize(operations, migration.app_label) + + if self.verbosity > 0: + if len(new_operations) == len(operations): + self.stdout.write(" No optimizations possible.") + else: + self.stdout.write( + " Optimized from %s operations to %s operations." % + (len(operations), len(new_operations)) + ) + + # Work out the value of replaces (any squashed ones we're re-squashing) + # need to feed their replaces into ours + replaces = [] + for migration in migrations_to_squash: + if migration.replaces: + replaces.extend(migration.replaces) + else: + replaces.append((migration.app_label, migration.name)) + + # Make a new migration with those operations + subclass = type("Migration", (migrations.Migration, ), { + "dependencies": dependencies, + "operations": new_operations, + "replaces": replaces, + }) + if start_migration_name: + if squashed_name: + # Use the name from --squashed-name. + prefix, _ = start_migration.name.split('_', 1) + name = '%s_%s' % (prefix, squashed_name) + else: + # Generate a name. + name = '%s_squashed_%s' % (start_migration.name, migration.name) + new_migration = subclass(name, app_label) + else: + name = '0001_%s' % (squashed_name or 'squashed_%s' % migration.name) + new_migration = subclass(name, app_label) + new_migration.initial = True + + # Write out the new migration file + writer = MigrationWriter(new_migration) + with open(writer.path, "w", encoding='utf-8') as fh: + fh.write(writer.as_string()) + + if self.verbosity > 0: + self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path)) + self.stdout.write(" You should commit this migration but leave the old ones in place;") + self.stdout.write(" the new migration will be used for new installs. Once you are sure") + self.stdout.write(" all instances of the codebase have applied the migrations you squashed,") + self.stdout.write(" you can delete them.") + if writer.needs_manual_porting: + self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required")) + self.stdout.write(" Your migrations contained functions that must be manually copied over,") + self.stdout.write(" as we could not safely copy their implementation.") + self.stdout.write(" See the comment at the top of the squashed migration for details.") + + def find_migration(self, loader, app_label, name): + try: + return loader.get_migration_by_prefix(app_label, name) + except AmbiguityError: + raise CommandError( + "More than one migration matches '%s' in app '%s'. Please be " + "more specific." % (name, app_label) + ) + except KeyError: + raise CommandError( + "Cannot find a migration matching '%s' from app '%s'." % + (name, app_label) + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/startapp.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/startapp.py new file mode 100644 index 0000000000000000000000000000000000000000..bba9f3dee0986578755e99e294991a44ce5ebfa5 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/startapp.py @@ -0,0 +1,14 @@ +from django.core.management.templates import TemplateCommand + + +class Command(TemplateCommand): + help = ( + "Creates a Django app directory structure for the given app name in " + "the current directory or optionally in the given directory." + ) + missing_args_message = "You must provide an application name." + + def handle(self, **options): + app_name = options.pop('name') + target = options.pop('directory') + super().handle('app', app_name, target, **options) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/startproject.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/startproject.py new file mode 100644 index 0000000000000000000000000000000000000000..7e09a25e914b600c30236e6e0468ee3b6439b679 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/startproject.py @@ -0,0 +1,20 @@ +from django.core.management.templates import TemplateCommand + +from ..utils import get_random_secret_key + + +class Command(TemplateCommand): + help = ( + "Creates a Django project directory structure for the given project " + "name in the current directory or optionally in the given directory." + ) + missing_args_message = "You must provide a project name." + + def handle(self, **options): + project_name = options.pop('name') + target = options.pop('directory') + + # Create a random SECRET_KEY to put it in the main settings. + options['secret_key'] = get_random_secret_key() + + super().handle('project', project_name, target, **options) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/test.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/test.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4016ed068650887bc0c6622da2718c90750dce --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/test.py @@ -0,0 +1,62 @@ +import sys + +from django.conf import settings +from django.core.management.base import BaseCommand +from django.test.utils import get_runner + + +class Command(BaseCommand): + help = 'Discover and run tests in the specified modules or the current directory.' + + # DiscoverRunner runs the checks after databases are set up. + requires_system_checks = False + test_runner = None + + def run_from_argv(self, argv): + """ + Pre-parse the command line to extract the value of the --testrunner + option. This allows a test runner to define additional command line + arguments. + """ + option = '--testrunner=' + for arg in argv[2:]: + if arg.startswith(option): + self.test_runner = arg[len(option):] + break + super().run_from_argv(argv) + + def add_arguments(self, parser): + parser.add_argument( + 'args', metavar='test_label', nargs='*', + help='Module paths to test; can be modulename, modulename.TestCase or modulename.TestCase.test_method' + ) + parser.add_argument( + '--noinput', '--no-input', action='store_false', dest='interactive', + help='Tells Django to NOT prompt the user for input of any kind.', + ) + parser.add_argument( + '--failfast', action='store_true', dest='failfast', + help='Tells Django to stop running the test suite after first failed test.', + ) + parser.add_argument( + '--testrunner', action='store', dest='testrunner', + help='Tells Django to use specified test runner class instead of ' + 'the one specified by the TEST_RUNNER setting.', + ) + + test_runner_class = get_runner(settings, self.test_runner) + + if hasattr(test_runner_class, 'add_arguments'): + test_runner_class.add_arguments(parser) + + def handle(self, *test_labels, **options): + from django.conf import settings + from django.test.utils import get_runner + + TestRunner = get_runner(settings, options['testrunner']) + + test_runner = TestRunner(**options) + failures = test_runner.run_tests(test_labels) + + if failures: + sys.exit(1) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/testserver.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/testserver.py new file mode 100644 index 0000000000000000000000000000000000000000..03814e5ea31a2e43f5262d5acf6703949fab410c --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/commands/testserver.py @@ -0,0 +1,54 @@ +from django.core.management import call_command +from django.core.management.base import BaseCommand +from django.db import connection + + +class Command(BaseCommand): + help = 'Runs a development server with data from the given fixture(s).' + + requires_system_checks = False + + def add_arguments(self, parser): + parser.add_argument( + 'args', metavar='fixture', nargs='*', + help='Path(s) to fixtures to load before running the server.', + ) + parser.add_argument( + '--noinput', '--no-input', action='store_false', dest='interactive', + help='Tells Django to NOT prompt the user for input of any kind.', + ) + parser.add_argument( + '--addrport', default='', + help='Port number or ipaddr:port to run the server on.', + ) + parser.add_argument( + '--ipv6', '-6', action='store_true', dest='use_ipv6', + help='Tells Django to use an IPv6 address.', + ) + + def handle(self, *fixture_labels, **options): + verbosity = options['verbosity'] + interactive = options['interactive'] + + # Create a test database. + db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive, serialize=False) + + # Import the fixture data into the test database. + call_command('loaddata', *fixture_labels, **{'verbosity': verbosity}) + + # Run the development server. Turn off auto-reloading because it causes + # a strange error -- it causes this handle() method to be called + # multiple times. + shutdown_message = ( + '\nServer stopped.\nNote that the test database, %r, has not been ' + 'deleted. You can explore it on your own.' % db_name + ) + use_threading = connection.features.test_db_allows_multiple_connections + call_command( + 'runserver', + addrport=options['addrport'], + shutdown_message=shutdown_message, + use_reloader=False, + use_ipv6=options['use_ipv6'], + use_threading=use_threading + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/sql.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..44b57b386a5371f54a8bd5599babc3cbf3e38870 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/sql.py @@ -0,0 +1,52 @@ +from django.apps import apps +from django.db import models + + +def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False): + """ + Return a list of the SQL statements used to flush the database. + + If only_django is True, only include the table names that have associated + Django models and are in INSTALLED_APPS . + """ + if only_django: + tables = connection.introspection.django_table_names(only_existing=True, include_views=False) + else: + tables = connection.introspection.table_names(include_views=False) + seqs = connection.introspection.sequence_list() if reset_sequences else () + statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade) + return statements + + +def emit_pre_migrate_signal(verbosity, interactive, db, **kwargs): + # Emit the pre_migrate signal for every application. + for app_config in apps.get_app_configs(): + if app_config.models_module is None: + continue + if verbosity >= 2: + print("Running pre-migrate handlers for application %s" % app_config.label) + models.signals.pre_migrate.send( + sender=app_config, + app_config=app_config, + verbosity=verbosity, + interactive=interactive, + using=db, + **kwargs + ) + + +def emit_post_migrate_signal(verbosity, interactive, db, **kwargs): + # Emit the post_migrate signal for every application. + for app_config in apps.get_app_configs(): + if app_config.models_module is None: + continue + if verbosity >= 2: + print("Running post-migrate handlers for application %s" % app_config.label) + models.signals.post_migrate.send( + sender=app_config, + app_config=app_config, + verbosity=verbosity, + interactive=interactive, + using=db, + **kwargs + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/templates.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/templates.py new file mode 100644 index 0000000000000000000000000000000000000000..3fcd25f951c460fd385d39639d41ab0a7d85b6c1 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/templates.py @@ -0,0 +1,340 @@ +import cgi +import mimetypes +import os +import posixpath +import re +import shutil +import stat +import tempfile +from importlib import import_module +from os import path +from urllib.request import urlretrieve + +import django +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError +from django.core.management.utils import handle_extensions +from django.template import Context, Engine +from django.utils import archive +from django.utils.version import get_docs_version + +_drive_re = re.compile('^([a-z]):', re.I) +_url_drive_re = re.compile('^([a-z])[:|]', re.I) + + +class TemplateCommand(BaseCommand): + """ + Copy either a Django application layout template or a Django project + layout template into the specified directory. + + :param style: A color style object (see django.core.management.color). + :param app_or_project: The string 'app' or 'project'. + :param name: The name of the application or project. + :param directory: The directory to which the template should be copied. + :param options: The additional variables passed to project or app templates + """ + requires_system_checks = False + # The supported URL schemes + url_schemes = ['http', 'https', 'ftp'] + # Can't perform any active locale changes during this command, because + # setting might not be available at all. + leave_locale_alone = True + # Rewrite the following suffixes when determining the target filename. + rewrite_template_suffixes = ( + # Allow shipping invalid .py files without byte-compilation. + ('.py-tpl', '.py'), + ) + + def add_arguments(self, parser): + parser.add_argument('name', help='Name of the application or project.') + parser.add_argument('directory', nargs='?', help='Optional destination directory') + parser.add_argument('--template', help='The path or URL to load the template from.') + parser.add_argument( + '--extension', '-e', dest='extensions', + action='append', default=['py'], + help='The file extension(s) to render (default: "py"). ' + 'Separate multiple extensions with commas, or use ' + '-e multiple times.' + ) + parser.add_argument( + '--name', '-n', dest='files', + action='append', default=[], + help='The file name(s) to render. Separate multiple file names ' + 'with commas, or use -n multiple times.' + ) + + def handle(self, app_or_project, name, target=None, **options): + self.app_or_project = app_or_project + self.paths_to_remove = [] + self.verbosity = options['verbosity'] + + self.validate_name(name, app_or_project) + + # if some directory is given, make sure it's nicely expanded + if target is None: + top_dir = path.join(os.getcwd(), name) + try: + os.makedirs(top_dir) + except FileExistsError: + raise CommandError("'%s' already exists" % top_dir) + except OSError as e: + raise CommandError(e) + else: + top_dir = os.path.abspath(path.expanduser(target)) + if not os.path.exists(top_dir): + raise CommandError("Destination directory '%s' does not " + "exist, please create it first." % top_dir) + + extensions = tuple(handle_extensions(options['extensions'])) + extra_files = [] + for file in options['files']: + extra_files.extend(map(lambda x: x.strip(), file.split(','))) + if self.verbosity >= 2: + self.stdout.write("Rendering %s template files with " + "extensions: %s\n" % + (app_or_project, ', '.join(extensions))) + self.stdout.write("Rendering %s template files with " + "filenames: %s\n" % + (app_or_project, ', '.join(extra_files))) + + base_name = '%s_name' % app_or_project + base_subdir = '%s_template' % app_or_project + base_directory = '%s_directory' % app_or_project + camel_case_name = 'camel_case_%s_name' % app_or_project + camel_case_value = ''.join(x for x in name.title() if x != '_') + + context = Context(dict(options, **{ + base_name: name, + base_directory: top_dir, + camel_case_name: camel_case_value, + 'docs_version': get_docs_version(), + 'django_version': django.__version__, + }), autoescape=False) + + # Setup a stub settings environment for template rendering + if not settings.configured: + settings.configure() + django.setup() + + template_dir = self.handle_template(options['template'], + base_subdir) + prefix_length = len(template_dir) + 1 + + for root, dirs, files in os.walk(template_dir): + + path_rest = root[prefix_length:] + relative_dir = path_rest.replace(base_name, name) + if relative_dir: + target_dir = path.join(top_dir, relative_dir) + if not path.exists(target_dir): + os.mkdir(target_dir) + + for dirname in dirs[:]: + if dirname.startswith('.') or dirname == '__pycache__': + dirs.remove(dirname) + + for filename in files: + if filename.endswith(('.pyo', '.pyc', '.py.class')): + # Ignore some files as they cause various breakages. + continue + old_path = path.join(root, filename) + new_path = path.join(top_dir, relative_dir, + filename.replace(base_name, name)) + for old_suffix, new_suffix in self.rewrite_template_suffixes: + if new_path.endswith(old_suffix): + new_path = new_path[:-len(old_suffix)] + new_suffix + break # Only rewrite once + + if path.exists(new_path): + raise CommandError("%s already exists, overlaying a " + "project or app into an existing " + "directory won't replace conflicting " + "files" % new_path) + + # Only render the Python files, as we don't want to + # accidentally render Django templates files + if new_path.endswith(extensions) or filename in extra_files: + with open(old_path, 'r', encoding='utf-8') as template_file: + content = template_file.read() + template = Engine().from_string(content) + content = template.render(context) + with open(new_path, 'w', encoding='utf-8') as new_file: + new_file.write(content) + else: + shutil.copyfile(old_path, new_path) + + if self.verbosity >= 2: + self.stdout.write("Creating %s\n" % new_path) + try: + shutil.copymode(old_path, new_path) + self.make_writeable(new_path) + except OSError: + self.stderr.write( + "Notice: Couldn't set permission bits on %s. You're " + "probably using an uncommon filesystem setup. No " + "problem." % new_path, self.style.NOTICE) + + if self.paths_to_remove: + if self.verbosity >= 2: + self.stdout.write("Cleaning up temporary files.\n") + for path_to_remove in self.paths_to_remove: + if path.isfile(path_to_remove): + os.remove(path_to_remove) + else: + shutil.rmtree(path_to_remove) + + def handle_template(self, template, subdir): + """ + Determine where the app or project templates are. + Use django.__path__[0] as the default because the Django install + directory isn't known. + """ + if template is None: + return path.join(django.__path__[0], 'conf', subdir) + else: + if template.startswith('file://'): + template = template[7:] + expanded_template = path.expanduser(template) + expanded_template = path.normpath(expanded_template) + if path.isdir(expanded_template): + return expanded_template + if self.is_url(template): + # downloads the file and returns the path + absolute_path = self.download(template) + else: + absolute_path = path.abspath(expanded_template) + if path.exists(absolute_path): + return self.extract(absolute_path) + + raise CommandError("couldn't handle %s template %s." % + (self.app_or_project, template)) + + def validate_name(self, name, app_or_project): + a_or_an = 'an' if app_or_project == 'app' else 'a' + if name is None: + raise CommandError('you must provide {an} {app} name'.format( + an=a_or_an, + app=app_or_project, + )) + # Check it's a valid directory name. + if not name.isidentifier(): + raise CommandError( + "'{name}' is not a valid {app} name. Please make sure the " + "name is a valid identifier.".format( + name=name, + app=app_or_project, + ) + ) + # Check it cannot be imported. + try: + import_module(name) + except ImportError: + pass + else: + raise CommandError( + "'{name}' conflicts with the name of an existing Python " + "module and cannot be used as {an} {app} name. Please try " + "another name.".format( + name=name, + an=a_or_an, + app=app_or_project, + ) + ) + + def download(self, url): + """ + Download the given URL and return the file name. + """ + def cleanup_url(url): + tmp = url.rstrip('/') + filename = tmp.split('/')[-1] + if url.endswith('/'): + display_url = tmp + '/' + else: + display_url = url + return filename, display_url + + prefix = 'django_%s_template_' % self.app_or_project + tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download') + self.paths_to_remove.append(tempdir) + filename, display_url = cleanup_url(url) + + if self.verbosity >= 2: + self.stdout.write("Downloading %s\n" % display_url) + try: + the_path, info = urlretrieve(url, path.join(tempdir, filename)) + except IOError as e: + raise CommandError("couldn't download URL %s to %s: %s" % + (url, filename, e)) + + used_name = the_path.split('/')[-1] + + # Trying to get better name from response headers + content_disposition = info.get('content-disposition') + if content_disposition: + _, params = cgi.parse_header(content_disposition) + guessed_filename = params.get('filename') or used_name + else: + guessed_filename = used_name + + # Falling back to content type guessing + ext = self.splitext(guessed_filename)[1] + content_type = info.get('content-type') + if not ext and content_type: + ext = mimetypes.guess_extension(content_type) + if ext: + guessed_filename += ext + + # Move the temporary file to a filename that has better + # chances of being recognized by the archive utils + if used_name != guessed_filename: + guessed_path = path.join(tempdir, guessed_filename) + shutil.move(the_path, guessed_path) + return guessed_path + + # Giving up + return the_path + + def splitext(self, the_path): + """ + Like os.path.splitext, but takes off .tar, too + """ + base, ext = posixpath.splitext(the_path) + if base.lower().endswith('.tar'): + ext = base[-4:] + ext + base = base[:-4] + return base, ext + + def extract(self, filename): + """ + Extract the given file to a temporarily and return + the path of the directory with the extracted content. + """ + prefix = 'django_%s_template_' % self.app_or_project + tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract') + self.paths_to_remove.append(tempdir) + if self.verbosity >= 2: + self.stdout.write("Extracting %s\n" % filename) + try: + archive.extract(filename, tempdir) + return tempdir + except (archive.ArchiveException, IOError) as e: + raise CommandError("couldn't extract file %s to %s: %s" % + (filename, tempdir, e)) + + def is_url(self, template): + """Return True if the name looks like a URL.""" + if ':' not in template: + return False + scheme = template.split(':', 1)[0].lower() + return scheme in self.url_schemes + + def make_writeable(self, filename): + """ + Make sure that the file is writeable. + Useful if our source is read-only. + """ + if not os.access(filename, os.W_OK): + st = os.stat(filename) + new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR + os.chmod(filename, new_permissions) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ac6fad581410b9020ec8014683417c42a32224 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/management/utils.py @@ -0,0 +1,108 @@ +import os +from subprocess import PIPE, Popen + +from django.apps import apps as installed_apps +from django.utils.crypto import get_random_string +from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text + +from .base import CommandError + + +def popen_wrapper(args, os_err_exc_type=CommandError, stdout_encoding='utf-8'): + """ + Friendly wrapper around Popen. + + Return stdout output, stderr output, and OS status code. + """ + try: + p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt') + except OSError as err: + raise os_err_exc_type('Error executing %s' % args[0]) from err + output, errors = p.communicate() + return ( + force_text(output, stdout_encoding, strings_only=True, errors='strict'), + force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True, errors='replace'), + p.returncode + ) + + +def handle_extensions(extensions): + """ + Organize multiple extensions that are separated with commas or passed by + using --extension/-e multiple times. + + For example: running 'django-admin makemessages -e js,txt -e xhtml -a' + would result in an extension list: ['.js', '.txt', '.xhtml'] + + >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) + {'.html', '.js', '.py'} + >>> handle_extensions(['.html, txt,.tpl']) + {'.html', '.tpl', '.txt'} + """ + ext_list = [] + for ext in extensions: + ext_list.extend(ext.replace(' ', '').split(',')) + for i, ext in enumerate(ext_list): + if not ext.startswith('.'): + ext_list[i] = '.%s' % ext_list[i] + return set(ext_list) + + +def find_command(cmd, path=None, pathext=None): + if path is None: + path = os.environ.get('PATH', '').split(os.pathsep) + if isinstance(path, str): + path = [path] + # check if there are funny path extensions for executables, e.g. Windows + if pathext is None: + pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep) + # don't use extensions if the command ends with one of them + for ext in pathext: + if cmd.endswith(ext): + pathext = [''] + break + # check if we find the command on PATH + for p in path: + f = os.path.join(p, cmd) + if os.path.isfile(f): + return f + for ext in pathext: + fext = f + ext + if os.path.isfile(fext): + return fext + return None + + +def get_random_secret_key(): + """ + Return a 50 character random string usable as a SECRET_KEY setting value. + """ + chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' + return get_random_string(50, chars) + + +def parse_apps_and_model_labels(labels): + """ + Parse a list of "app_label.ModelName" or "app_label" strings into actual + objects and return a two-element tuple: + (set of model classes, set of app_configs). + Raise a CommandError if some specified models or apps don't exist. + """ + apps = set() + models = set() + + for label in labels: + if '.' in label: + try: + model = installed_apps.get_model(label) + except LookupError: + raise CommandError('Unknown model: %s' % label) + models.add(model) + else: + try: + app_config = installed_apps.get_app_config(label) + except LookupError as e: + raise CommandError(str(e)) + apps.add(app_config) + + return models, apps diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/paginator.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/paginator.py new file mode 100644 index 0000000000000000000000000000000000000000..b07be513d39def3c7132aed0de241192caaf1f83 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/paginator.py @@ -0,0 +1,185 @@ +import collections +import warnings +from math import ceil + +from django.utils.functional import cached_property +from django.utils.translation import gettext_lazy as _ + + +class UnorderedObjectListWarning(RuntimeWarning): + pass + + +class InvalidPage(Exception): + pass + + +class PageNotAnInteger(InvalidPage): + pass + + +class EmptyPage(InvalidPage): + pass + + +class Paginator: + + def __init__(self, object_list, per_page, orphans=0, + allow_empty_first_page=True): + self.object_list = object_list + self._check_object_list_is_ordered() + self.per_page = int(per_page) + self.orphans = int(orphans) + self.allow_empty_first_page = allow_empty_first_page + + def validate_number(self, number): + """Validate the given 1-based page number.""" + try: + number = int(number) + except (TypeError, ValueError): + raise PageNotAnInteger(_('That page number is not an integer')) + if number < 1: + raise EmptyPage(_('That page number is less than 1')) + if number > self.num_pages: + if number == 1 and self.allow_empty_first_page: + pass + else: + raise EmptyPage(_('That page contains no results')) + return number + + def get_page(self, number): + """ + Return a valid page, even if the page argument isn't a number or isn't + in range. + """ + try: + number = self.validate_number(number) + except PageNotAnInteger: + number = 1 + except EmptyPage: + number = self.num_pages + return self.page(number) + + def page(self, number): + """Return a Page object for the given 1-based page number.""" + number = self.validate_number(number) + bottom = (number - 1) * self.per_page + top = bottom + self.per_page + if top + self.orphans >= self.count: + top = self.count + return self._get_page(self.object_list[bottom:top], number, self) + + def _get_page(self, *args, **kwargs): + """ + Return an instance of a single page. + + This hook can be used by subclasses to use an alternative to the + standard :cls:`Page` object. + """ + return Page(*args, **kwargs) + + @cached_property + def count(self): + """Return the total number of objects, across all pages.""" + try: + return self.object_list.count() + except (AttributeError, TypeError): + # AttributeError if object_list has no count() method. + # TypeError if object_list.count() requires arguments + # (i.e. is of type list). + return len(self.object_list) + + @cached_property + def num_pages(self): + """Return the total number of pages.""" + if self.count == 0 and not self.allow_empty_first_page: + return 0 + hits = max(1, self.count - self.orphans) + return int(ceil(hits / float(self.per_page))) + + @property + def page_range(self): + """ + Return a 1-based range of pages for iterating through within + a template for loop. + """ + return range(1, self.num_pages + 1) + + def _check_object_list_is_ordered(self): + """ + Warn if self.object_list is unordered (typically a QuerySet). + """ + ordered = getattr(self.object_list, 'ordered', None) + if ordered is not None and not ordered: + obj_list_repr = ( + '{} {}'.format(self.object_list.model, self.object_list.__class__.__name__) + if hasattr(self.object_list, 'model') + else '{!r}'.format(self.object_list) + ) + warnings.warn( + 'Pagination may yield inconsistent results with an unordered ' + 'object_list: {}.'.format(obj_list_repr), + UnorderedObjectListWarning, + stacklevel=3 + ) + + +QuerySetPaginator = Paginator # For backwards-compatibility. + + +class Page(collections.Sequence): + + def __init__(self, object_list, number, paginator): + self.object_list = object_list + self.number = number + self.paginator = paginator + + def __repr__(self): + return '' % (self.number, self.paginator.num_pages) + + def __len__(self): + return len(self.object_list) + + def __getitem__(self, index): + if not isinstance(index, (int, slice)): + raise TypeError + # The object_list is converted to a list so that if it was a QuerySet + # it won't be a database hit per __getitem__. + if not isinstance(self.object_list, list): + self.object_list = list(self.object_list) + return self.object_list[index] + + def has_next(self): + return self.number < self.paginator.num_pages + + def has_previous(self): + return self.number > 1 + + def has_other_pages(self): + return self.has_previous() or self.has_next() + + def next_page_number(self): + return self.paginator.validate_number(self.number + 1) + + def previous_page_number(self): + return self.paginator.validate_number(self.number - 1) + + def start_index(self): + """ + Return the 1-based index of the first object on this page, + relative to total objects in the paginator. + """ + # Special case, return zero if no items. + if self.paginator.count == 0: + return 0 + return (self.paginator.per_page * (self.number - 1)) + 1 + + def end_index(self): + """ + Return the 1-based index of the last object on this page, + relative to total objects found (hits). + """ + # Special case for the last page because there can be orphans. + if self.number == self.paginator.num_pages: + return self.paginator.count + return self.number * self.paginator.per_page diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..666d1bb297687b6a9810103004dc67deb876f434 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/__init__.py @@ -0,0 +1,238 @@ +""" +Interfaces for serializing Django objects. + +Usage:: + + from django.core import serializers + json = serializers.serialize("json", some_queryset) + objects = list(serializers.deserialize("json", json)) + +To add your own serializers, use the SERIALIZATION_MODULES setting:: + + SERIALIZATION_MODULES = { + "csv": "path.to.csv.serializer", + "txt": "path.to.txt.serializer", + } + +""" + +import importlib + +from django.apps import apps +from django.conf import settings +from django.core.serializers.base import SerializerDoesNotExist + +# Built-in serializers +BUILTIN_SERIALIZERS = { + "xml": "django.core.serializers.xml_serializer", + "python": "django.core.serializers.python", + "json": "django.core.serializers.json", + "yaml": "django.core.serializers.pyyaml", +} + +_serializers = {} + + +class BadSerializer: + """ + Stub serializer to hold exception raised during registration + + This allows the serializer registration to cache serializers and if there + is an error raised in the process of creating a serializer it will be + raised and passed along to the caller when the serializer is used. + """ + internal_use_only = False + + def __init__(self, exception): + self.exception = exception + + def __call__(self, *args, **kwargs): + raise self.exception + + +def register_serializer(format, serializer_module, serializers=None): + """Register a new serializer. + + ``serializer_module`` should be the fully qualified module name + for the serializer. + + If ``serializers`` is provided, the registration will be added + to the provided dictionary. + + If ``serializers`` is not provided, the registration will be made + directly into the global register of serializers. Adding serializers + directly is not a thread-safe operation. + """ + if serializers is None and not _serializers: + _load_serializers() + + try: + module = importlib.import_module(serializer_module) + except ImportError as exc: + bad_serializer = BadSerializer(exc) + + module = type('BadSerializerModule', (), { + 'Deserializer': bad_serializer, + 'Serializer': bad_serializer, + }) + + if serializers is None: + _serializers[format] = module + else: + serializers[format] = module + + +def unregister_serializer(format): + "Unregister a given serializer. This is not a thread-safe operation." + if not _serializers: + _load_serializers() + if format not in _serializers: + raise SerializerDoesNotExist(format) + del _serializers[format] + + +def get_serializer(format): + if not _serializers: + _load_serializers() + if format not in _serializers: + raise SerializerDoesNotExist(format) + return _serializers[format].Serializer + + +def get_serializer_formats(): + if not _serializers: + _load_serializers() + return list(_serializers) + + +def get_public_serializer_formats(): + if not _serializers: + _load_serializers() + return [k for k, v in _serializers.items() if not v.Serializer.internal_use_only] + + +def get_deserializer(format): + if not _serializers: + _load_serializers() + if format not in _serializers: + raise SerializerDoesNotExist(format) + return _serializers[format].Deserializer + + +def serialize(format, queryset, **options): + """ + Serialize a queryset (or any iterator that returns database objects) using + a certain serializer. + """ + s = get_serializer(format)() + s.serialize(queryset, **options) + return s.getvalue() + + +def deserialize(format, stream_or_string, **options): + """ + Deserialize a stream or a string. Return an iterator that yields ``(obj, + m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* -- + object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name : + list_of_related_objects}``. + """ + d = get_deserializer(format) + return d(stream_or_string, **options) + + +def _load_serializers(): + """ + Register built-in and settings-defined serializers. This is done lazily so + that user code has a chance to (e.g.) set up custom settings without + needing to be careful of import order. + """ + global _serializers + serializers = {} + for format in BUILTIN_SERIALIZERS: + register_serializer(format, BUILTIN_SERIALIZERS[format], serializers) + if hasattr(settings, "SERIALIZATION_MODULES"): + for format in settings.SERIALIZATION_MODULES: + register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers) + _serializers = serializers + + +def sort_dependencies(app_list): + """Sort a list of (app_config, models) pairs into a single list of models. + + The single list of models is sorted so that any model with a natural key + is serialized before a normal model, and any model with a natural key + dependency has it's dependencies serialized first. + """ + # Process the list of models, and get the list of dependencies + model_dependencies = [] + models = set() + for app_config, model_list in app_list: + if model_list is None: + model_list = app_config.get_models() + + for model in model_list: + models.add(model) + # Add any explicitly defined dependencies + if hasattr(model, 'natural_key'): + deps = getattr(model.natural_key, 'dependencies', []) + if deps: + deps = [apps.get_model(dep) for dep in deps] + else: + deps = [] + + # Now add a dependency for any FK relation with a model that + # defines a natural key + for field in model._meta.fields: + if field.remote_field: + rel_model = field.remote_field.model + if hasattr(rel_model, 'natural_key') and rel_model != model: + deps.append(rel_model) + # Also add a dependency for any simple M2M relation with a model + # that defines a natural key. M2M relations with explicit through + # models don't count as dependencies. + for field in model._meta.many_to_many: + if field.remote_field.through._meta.auto_created: + rel_model = field.remote_field.model + if hasattr(rel_model, 'natural_key') and rel_model != model: + deps.append(rel_model) + model_dependencies.append((model, deps)) + + model_dependencies.reverse() + # Now sort the models to ensure that dependencies are met. This + # is done by repeatedly iterating over the input list of models. + # If all the dependencies of a given model are in the final list, + # that model is promoted to the end of the final list. This process + # continues until the input list is empty, or we do a full iteration + # over the input models without promoting a model to the final list. + # If we do a full iteration without a promotion, that means there are + # circular dependencies in the list. + model_list = [] + while model_dependencies: + skipped = [] + changed = False + while model_dependencies: + model, deps = model_dependencies.pop() + + # If all of the models in the dependency list are either already + # on the final model list, or not on the original serialization list, + # then we've found another model with all it's dependencies satisfied. + found = True + for candidate in ((d not in models or d in model_list) for d in deps): + if not candidate: + found = False + if found: + model_list.append(model) + changed = True + else: + skipped.append((model, deps)) + if not changed: + raise RuntimeError( + "Can't resolve dependencies for %s in serialized app list." % + ', '.join( + '%s.%s' % (model._meta.app_label, model._meta.object_name) + for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__) + ) + ) + model_dependencies = skipped + + return model_list diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..b29f96d3ecc91940346046f47a2c7ad02786f559 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/base.py @@ -0,0 +1,230 @@ +""" +Module for abstract serializer/unserializer base classes. +""" +from io import StringIO + +from django.db import models + + +class SerializerDoesNotExist(KeyError): + """The requested serializer was not found.""" + pass + + +class SerializationError(Exception): + """Something bad happened during serialization.""" + pass + + +class DeserializationError(Exception): + """Something bad happened during deserialization.""" + + @classmethod + def WithData(cls, original_exc, model, fk, field_value): + """ + Factory method for creating a deserialization error which has a more + explanatory message. + """ + return cls("%s: (%s:pk=%s) field_value was '%s'" % (original_exc, model, fk, field_value)) + + +class ProgressBar: + progress_width = 75 + + def __init__(self, output, total_count): + self.output = output + self.total_count = total_count + self.prev_done = 0 + + def update(self, count): + if not self.output: + return + perc = count * 100 // self.total_count + done = perc * self.progress_width // 100 + if self.prev_done >= done: + return + self.prev_done = done + cr = '' if self.total_count == 1 else '\r' + self.output.write(cr + '[' + '.' * done + ' ' * (self.progress_width - done) + ']') + if done == self.progress_width: + self.output.write('\n') + self.output.flush() + + +class Serializer: + """ + Abstract serializer base class. + """ + + # Indicates if the implemented serializer is only available for + # internal Django use. + internal_use_only = False + progress_class = ProgressBar + stream_class = StringIO + + def serialize(self, queryset, *, stream=None, fields=None, use_natural_foreign_keys=False, + use_natural_primary_keys=False, progress_output=None, object_count=0, **options): + """ + Serialize a queryset. + """ + self.options = options + + self.stream = stream if stream is not None else self.stream_class() + self.selected_fields = fields + self.use_natural_foreign_keys = use_natural_foreign_keys + self.use_natural_primary_keys = use_natural_primary_keys + progress_bar = self.progress_class(progress_output, object_count) + + self.start_serialization() + self.first = True + for count, obj in enumerate(queryset, start=1): + self.start_object(obj) + # Use the concrete parent class' _meta instead of the object's _meta + # This is to avoid local_fields problems for proxy models. Refs #17717. + concrete_model = obj._meta.concrete_model + for field in concrete_model._meta.local_fields: + if field.serialize: + if field.remote_field is None: + if self.selected_fields is None or field.attname in self.selected_fields: + self.handle_field(obj, field) + else: + if self.selected_fields is None or field.attname[:-3] in self.selected_fields: + self.handle_fk_field(obj, field) + for field in concrete_model._meta.many_to_many: + if field.serialize: + if self.selected_fields is None or field.attname in self.selected_fields: + self.handle_m2m_field(obj, field) + self.end_object(obj) + progress_bar.update(count) + if self.first: + self.first = False + self.end_serialization() + return self.getvalue() + + def start_serialization(self): + """ + Called when serializing of the queryset starts. + """ + raise NotImplementedError('subclasses of Serializer must provide a start_serialization() method') + + def end_serialization(self): + """ + Called when serializing of the queryset ends. + """ + pass + + def start_object(self, obj): + """ + Called when serializing of an object starts. + """ + raise NotImplementedError('subclasses of Serializer must provide a start_object() method') + + def end_object(self, obj): + """ + Called when serializing of an object ends. + """ + pass + + def handle_field(self, obj, field): + """ + Called to handle each individual (non-relational) field on an object. + """ + raise NotImplementedError('subclasses of Serializer must provide an handle_field() method') + + def handle_fk_field(self, obj, field): + """ + Called to handle a ForeignKey field. + """ + raise NotImplementedError('subclasses of Serializer must provide an handle_fk_field() method') + + def handle_m2m_field(self, obj, field): + """ + Called to handle a ManyToManyField. + """ + raise NotImplementedError('subclasses of Serializer must provide an handle_m2m_field() method') + + def getvalue(self): + """ + Return the fully serialized queryset (or None if the output stream is + not seekable). + """ + if callable(getattr(self.stream, 'getvalue', None)): + return self.stream.getvalue() + + +class Deserializer: + """ + Abstract base deserializer class. + """ + + def __init__(self, stream_or_string, **options): + """ + Init this serializer given a stream or a string + """ + self.options = options + if isinstance(stream_or_string, str): + self.stream = StringIO(stream_or_string) + else: + self.stream = stream_or_string + + def __iter__(self): + return self + + def __next__(self): + """Iteration iterface -- return the next item in the stream""" + raise NotImplementedError('subclasses of Deserializer must provide a __next__() method') + + +class DeserializedObject: + """ + A deserialized model. + + Basically a container for holding the pre-saved deserialized data along + with the many-to-many data saved with the object. + + Call ``save()`` to save the object (with the many-to-many data) to the + database; call ``save(save_m2m=False)`` to save just the object fields + (and not touch the many-to-many stuff.) + """ + + def __init__(self, obj, m2m_data=None): + self.object = obj + self.m2m_data = m2m_data + + def __repr__(self): + return "<%s: %s(pk=%s)>" % ( + self.__class__.__name__, + self.object._meta.label, + self.object.pk, + ) + + def save(self, save_m2m=True, using=None, **kwargs): + # Call save on the Model baseclass directly. This bypasses any + # model-defined save. The save is also forced to be raw. + # raw=True is passed to any pre/post_save signals. + models.Model.save_base(self.object, using=using, raw=True, **kwargs) + if self.m2m_data and save_m2m: + for accessor_name, object_list in self.m2m_data.items(): + getattr(self.object, accessor_name).set(object_list) + + # prevent a second (possibly accidental) call to save() from saving + # the m2m data twice. + self.m2m_data = None + + +def build_instance(Model, data, db): + """ + Build a model instance. + + If the model instance doesn't have a primary key and the model supports + natural keys, try to retrieve it from the database. + """ + obj = Model(**data) + if (obj.pk is None and hasattr(Model, 'natural_key') and + hasattr(Model._default_manager, 'get_by_natural_key')): + natural_key = obj.natural_key() + try: + obj.pk = Model._default_manager.db_manager(db).get_by_natural_key(*natural_key).pk + except Model.DoesNotExist: + pass + return obj diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/json.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/json.py new file mode 100644 index 0000000000000000000000000000000000000000..5d35440a6c627a8aca5e99c4ed6a67b084d328a3 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/json.py @@ -0,0 +1,104 @@ +""" +Serialize data to/from JSON +""" + +import datetime +import decimal +import json +import uuid + +from django.core.serializers.base import DeserializationError +from django.core.serializers.python import ( + Deserializer as PythonDeserializer, Serializer as PythonSerializer, +) +from django.utils.duration import duration_iso_string +from django.utils.functional import Promise +from django.utils.timezone import is_aware + + +class Serializer(PythonSerializer): + """Convert a queryset to JSON.""" + internal_use_only = False + + def _init_options(self): + self._current = None + self.json_kwargs = self.options.copy() + self.json_kwargs.pop('stream', None) + self.json_kwargs.pop('fields', None) + if self.options.get('indent'): + # Prevent trailing spaces + self.json_kwargs['separators'] = (',', ': ') + self.json_kwargs.setdefault('cls', DjangoJSONEncoder) + + def start_serialization(self): + self._init_options() + self.stream.write("[") + + def end_serialization(self): + if self.options.get("indent"): + self.stream.write("\n") + self.stream.write("]") + if self.options.get("indent"): + self.stream.write("\n") + + def end_object(self, obj): + # self._current has the field data + indent = self.options.get("indent") + if not self.first: + self.stream.write(",") + if not indent: + self.stream.write(" ") + if indent: + self.stream.write("\n") + json.dump(self.get_dump_object(obj), self.stream, **self.json_kwargs) + self._current = None + + def getvalue(self): + # Grandparent super + return super(PythonSerializer, self).getvalue() + + +def Deserializer(stream_or_string, **options): + """Deserialize a stream or string of JSON data.""" + if not isinstance(stream_or_string, (bytes, str)): + stream_or_string = stream_or_string.read() + if isinstance(stream_or_string, bytes): + stream_or_string = stream_or_string.decode() + try: + objects = json.loads(stream_or_string) + yield from PythonDeserializer(objects, **options) + except (GeneratorExit, DeserializationError): + raise + except Exception as exc: + raise DeserializationError() from exc + + +class DjangoJSONEncoder(json.JSONEncoder): + """ + JSONEncoder subclass that knows how to encode date/time, decimal types, and + UUIDs. + """ + def default(self, o): + # See "Date Time String Format" in the ECMA-262 specification. + if isinstance(o, datetime.datetime): + r = o.isoformat() + if o.microsecond: + r = r[:23] + r[26:] + if r.endswith('+00:00'): + r = r[:-6] + 'Z' + return r + elif isinstance(o, datetime.date): + return o.isoformat() + elif isinstance(o, datetime.time): + if is_aware(o): + raise ValueError("JSON can't represent timezone-aware times.") + r = o.isoformat() + if o.microsecond: + r = r[:12] + return r + elif isinstance(o, datetime.timedelta): + return duration_iso_string(o) + elif isinstance(o, (decimal.Decimal, uuid.UUID, Promise)): + return str(o) + else: + return super().default(o) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/python.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/python.py new file mode 100644 index 0000000000000000000000000000000000000000..3d6aa93b8e4c83b1f6fb323f9183ef0a008f09d9 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/python.py @@ -0,0 +1,179 @@ +""" +A Python "serializer". Doesn't do much serializing per se -- just converts to +and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for +other serializers. +""" +from collections import OrderedDict + +from django.apps import apps +from django.core.serializers import base +from django.db import DEFAULT_DB_ALIAS, models +from django.utils.encoding import is_protected_type + + +class Serializer(base.Serializer): + """ + Serialize a QuerySet to basic Python objects. + """ + + internal_use_only = True + + def start_serialization(self): + self._current = None + self.objects = [] + + def end_serialization(self): + pass + + def start_object(self, obj): + self._current = OrderedDict() + + def end_object(self, obj): + self.objects.append(self.get_dump_object(obj)) + self._current = None + + def get_dump_object(self, obj): + data = OrderedDict([('model', str(obj._meta))]) + if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): + data["pk"] = self._value_from_field(obj, obj._meta.pk) + data['fields'] = self._current + return data + + def _value_from_field(self, obj, field): + value = field.value_from_object(obj) + # Protected types (i.e., primitives like None, numbers, dates, + # and Decimals) are passed through as is. All other values are + # converted to string first. + return value if is_protected_type(value) else field.value_to_string(obj) + + def handle_field(self, obj, field): + self._current[field.name] = self._value_from_field(obj, field) + + def handle_fk_field(self, obj, field): + if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): + related = getattr(obj, field.name) + if related: + value = related.natural_key() + else: + value = None + else: + value = self._value_from_field(obj, field) + self._current[field.name] = value + + def handle_m2m_field(self, obj, field): + if field.remote_field.through._meta.auto_created: + if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): + def m2m_value(value): + return value.natural_key() + else: + def m2m_value(value): + return self._value_from_field(value, value._meta.pk) + self._current[field.name] = [ + m2m_value(related) for related in getattr(obj, field.name).iterator() + ] + + def getvalue(self): + return self.objects + + +def Deserializer(object_list, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options): + """ + Deserialize simple Python objects back into Django ORM instances. + + It's expected that you pass the Python objects themselves (instead of a + stream or a string) to the constructor + """ + field_names_cache = {} # Model: + + for d in object_list: + # Look up the model and starting build a dict of data for it. + try: + Model = _get_model(d["model"]) + except base.DeserializationError: + if ignorenonexistent: + continue + else: + raise + data = {} + if 'pk' in d: + try: + data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get('pk')) + except Exception as e: + raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), None) + m2m_data = {} + + if Model not in field_names_cache: + field_names_cache[Model] = {f.name for f in Model._meta.get_fields()} + field_names = field_names_cache[Model] + + # Handle each field + for (field_name, field_value) in d["fields"].items(): + + if ignorenonexistent and field_name not in field_names: + # skip fields no longer on model + continue + + field = Model._meta.get_field(field_name) + + # Handle M2M relations + if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel): + model = field.remote_field.model + if hasattr(model._default_manager, 'get_by_natural_key'): + def m2m_convert(value): + if hasattr(value, '__iter__') and not isinstance(value, str): + return model._default_manager.db_manager(using).get_by_natural_key(*value).pk + else: + return model._meta.pk.to_python(value) + else: + def m2m_convert(v): + return model._meta.pk.to_python(v) + + try: + m2m_data[field.name] = [] + for pk in field_value: + m2m_data[field.name].append(m2m_convert(pk)) + except Exception as e: + raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), pk) + + # Handle FK fields + elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel): + model = field.remote_field.model + if field_value is not None: + try: + default_manager = model._default_manager + field_name = field.remote_field.field_name + if hasattr(default_manager, 'get_by_natural_key'): + if hasattr(field_value, '__iter__') and not isinstance(field_value, str): + obj = default_manager.db_manager(using).get_by_natural_key(*field_value) + value = getattr(obj, field.remote_field.field_name) + # If this is a natural foreign key to an object that + # has a FK/O2O as the foreign key, use the FK value + if model._meta.pk.remote_field: + value = value.pk + else: + value = model._meta.get_field(field_name).to_python(field_value) + data[field.attname] = value + else: + data[field.attname] = model._meta.get_field(field_name).to_python(field_value) + except Exception as e: + raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value) + else: + data[field.attname] = None + + # Handle all other fields + else: + try: + data[field.name] = field.to_python(field_value) + except Exception as e: + raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value) + + obj = base.build_instance(Model, data, using) + yield base.DeserializedObject(obj, m2m_data) + + +def _get_model(model_identifier): + """Look up a model from an "app_label.model_name" string.""" + try: + return apps.get_model(model_identifier) + except (LookupError, TypeError): + raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/pyyaml.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/pyyaml.py new file mode 100644 index 0000000000000000000000000000000000000000..34fc122311c51ba099296fc2948f85759920dcbd --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/pyyaml.py @@ -0,0 +1,77 @@ +""" +YAML serializer. + +Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__. +""" + +import collections +import decimal +from io import StringIO + +import yaml + +from django.core.serializers.base import DeserializationError +from django.core.serializers.python import ( + Deserializer as PythonDeserializer, Serializer as PythonSerializer, +) +from django.db import models + +# Use the C (faster) implementation if possible +try: + from yaml import CSafeLoader as SafeLoader + from yaml import CSafeDumper as SafeDumper +except ImportError: + from yaml import SafeLoader, SafeDumper + + +class DjangoSafeDumper(SafeDumper): + def represent_decimal(self, data): + return self.represent_scalar('tag:yaml.org,2002:str', str(data)) + + def represent_ordered_dict(self, data): + return self.represent_mapping('tag:yaml.org,2002:map', data.items()) + + +DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal) +DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict) + + +class Serializer(PythonSerializer): + """Convert a queryset to YAML.""" + + internal_use_only = False + + def handle_field(self, obj, field): + # A nasty special case: base YAML doesn't support serialization of time + # types (as opposed to dates or datetimes, which it does support). Since + # we want to use the "safe" serializer for better interoperability, we + # need to do something with those pesky times. Converting 'em to strings + # isn't perfect, but it's better than a "!!python/time" type which would + # halt deserialization under any other language. + if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None: + self._current[field.name] = str(getattr(obj, field.name)) + else: + super().handle_field(obj, field) + + def end_serialization(self): + yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options) + + def getvalue(self): + # Grandparent super + return super(PythonSerializer, self).getvalue() + + +def Deserializer(stream_or_string, **options): + """Deserialize a stream or string of YAML data.""" + if isinstance(stream_or_string, bytes): + stream_or_string = stream_or_string.decode() + if isinstance(stream_or_string, str): + stream = StringIO(stream_or_string) + else: + stream = stream_or_string + try: + yield from PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options) + except (GeneratorExit, DeserializationError): + raise + except Exception as exc: + raise DeserializationError() from exc diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/xml_serializer.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/xml_serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..1c57e8b6604ecb01bed63dfdce1e7d786b58dab6 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/serializers/xml_serializer.py @@ -0,0 +1,384 @@ +""" +XML serializer. +""" + +from xml.dom import pulldom +from xml.sax import handler +from xml.sax.expatreader import ExpatParser as _ExpatParser + +from django.apps import apps +from django.conf import settings +from django.core.serializers import base +from django.db import DEFAULT_DB_ALIAS, models +from django.utils.xmlutils import ( + SimplerXMLGenerator, UnserializableContentError, +) + + +class Serializer(base.Serializer): + """Serialize a QuerySet to XML.""" + + def indent(self, level): + if self.options.get('indent') is not None: + self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level) + + def start_serialization(self): + """ + Start serialization -- open the XML document and the root element. + """ + self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET)) + self.xml.startDocument() + self.xml.startElement("django-objects", {"version": "1.0"}) + + def end_serialization(self): + """ + End serialization -- end the document. + """ + self.indent(0) + self.xml.endElement("django-objects") + self.xml.endDocument() + + def start_object(self, obj): + """ + Called as each object is handled. + """ + if not hasattr(obj, "_meta"): + raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) + + self.indent(1) + attrs = {'model': str(obj._meta)} + if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): + obj_pk = obj.pk + if obj_pk is not None: + attrs['pk'] = str(obj_pk) + + self.xml.startElement("object", attrs) + + def end_object(self, obj): + """ + Called after handling all fields for an object. + """ + self.indent(1) + self.xml.endElement("object") + + def handle_field(self, obj, field): + """ + Handle each field on an object (except for ForeignKeys and + ManyToManyFields). + """ + self.indent(2) + self.xml.startElement('field', { + 'name': field.name, + 'type': field.get_internal_type(), + }) + + # Get a "string version" of the object's data. + if getattr(obj, field.name) is not None: + try: + self.xml.characters(field.value_to_string(obj)) + except UnserializableContentError: + raise ValueError("%s.%s (pk:%s) contains unserializable characters" % ( + obj.__class__.__name__, field.name, obj.pk)) + else: + self.xml.addQuickElement("None") + + self.xml.endElement("field") + + def handle_fk_field(self, obj, field): + """ + Handle a ForeignKey (they need to be treated slightly + differently from regular fields). + """ + self._start_relational_field(field) + related_att = getattr(obj, field.get_attname()) + if related_att is not None: + if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): + related = getattr(obj, field.name) + # If related object has a natural key, use it + related = related.natural_key() + # Iterable natural keys are rolled out as subelements + for key_value in related: + self.xml.startElement("natural", {}) + self.xml.characters(str(key_value)) + self.xml.endElement("natural") + else: + self.xml.characters(str(related_att)) + else: + self.xml.addQuickElement("None") + self.xml.endElement("field") + + def handle_m2m_field(self, obj, field): + """ + Handle a ManyToManyField. Related objects are only serialized as + references to the object's PK (i.e. the related *data* is not dumped, + just the relation). + """ + if field.remote_field.through._meta.auto_created: + self._start_relational_field(field) + if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): + # If the objects in the m2m have a natural key, use it + def handle_m2m(value): + natural = value.natural_key() + # Iterable natural keys are rolled out as subelements + self.xml.startElement("object", {}) + for key_value in natural: + self.xml.startElement("natural", {}) + self.xml.characters(str(key_value)) + self.xml.endElement("natural") + self.xml.endElement("object") + else: + def handle_m2m(value): + self.xml.addQuickElement("object", attrs={ + 'pk': str(value.pk) + }) + for relobj in getattr(obj, field.name).iterator(): + handle_m2m(relobj) + + self.xml.endElement("field") + + def _start_relational_field(self, field): + """Output the element for relational fields.""" + self.indent(2) + self.xml.startElement('field', { + 'name': field.name, + 'rel': field.remote_field.__class__.__name__, + 'to': str(field.remote_field.model._meta), + }) + + +class Deserializer(base.Deserializer): + """Deserialize XML.""" + + def __init__(self, stream_or_string, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options): + super().__init__(stream_or_string, **options) + self.event_stream = pulldom.parse(self.stream, self._make_parser()) + self.db = using + self.ignore = ignorenonexistent + + def _make_parser(self): + """Create a hardened XML parser (no custom/external entities).""" + return DefusedExpatParser() + + def __next__(self): + for event, node in self.event_stream: + if event == "START_ELEMENT" and node.nodeName == "object": + self.event_stream.expandNode(node) + return self._handle_object(node) + raise StopIteration + + def _handle_object(self, node): + """Convert an node to a DeserializedObject.""" + # Look up the model using the model loading mechanism. If this fails, + # bail. + Model = self._get_model_from_node(node, "model") + + # Start building a data dictionary from the object. + data = {} + if node.hasAttribute('pk'): + data[Model._meta.pk.attname] = Model._meta.pk.to_python( + node.getAttribute('pk')) + + # Also start building a dict of m2m data (this is saved as + # {m2m_accessor_attribute : [list_of_related_objects]}) + m2m_data = {} + + field_names = {f.name for f in Model._meta.get_fields()} + # Deserialize each field. + for field_node in node.getElementsByTagName("field"): + # If the field is missing the name attribute, bail (are you + # sensing a pattern here?) + field_name = field_node.getAttribute("name") + if not field_name: + raise base.DeserializationError(" node is missing the 'name' attribute") + + # Get the field from the Model. This will raise a + # FieldDoesNotExist if, well, the field doesn't exist, which will + # be propagated correctly unless ignorenonexistent=True is used. + if self.ignore and field_name not in field_names: + continue + field = Model._meta.get_field(field_name) + + # As is usually the case, relation fields get the special treatment. + if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel): + m2m_data[field.name] = self._handle_m2m_field_node(field_node, field) + elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel): + data[field.attname] = self._handle_fk_field_node(field_node, field) + else: + if field_node.getElementsByTagName('None'): + value = None + else: + value = field.to_python(getInnerText(field_node).strip()) + data[field.name] = value + + obj = base.build_instance(Model, data, self.db) + + # Return a DeserializedObject so that the m2m data has a place to live. + return base.DeserializedObject(obj, m2m_data) + + def _handle_fk_field_node(self, node, field): + """ + Handle a node for a ForeignKey + """ + # Check if there is a child node named 'None', returning None if so. + if node.getElementsByTagName('None'): + return None + else: + model = field.remote_field.model + if hasattr(model._default_manager, 'get_by_natural_key'): + keys = node.getElementsByTagName('natural') + if keys: + # If there are 'natural' subelements, it must be a natural key + field_value = [getInnerText(k).strip() for k in keys] + obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value) + obj_pk = getattr(obj, field.remote_field.field_name) + # If this is a natural foreign key to an object that + # has a FK/O2O as the foreign key, use the FK value + if field.remote_field.model._meta.pk.remote_field: + obj_pk = obj_pk.pk + else: + # Otherwise, treat like a normal PK + field_value = getInnerText(node).strip() + obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value) + return obj_pk + else: + field_value = getInnerText(node).strip() + return model._meta.get_field(field.remote_field.field_name).to_python(field_value) + + def _handle_m2m_field_node(self, node, field): + """ + Handle a node for a ManyToManyField. + """ + model = field.remote_field.model + default_manager = model._default_manager + if hasattr(default_manager, 'get_by_natural_key'): + def m2m_convert(n): + keys = n.getElementsByTagName('natural') + if keys: + # If there are 'natural' subelements, it must be a natural key + field_value = [getInnerText(k).strip() for k in keys] + obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk + else: + # Otherwise, treat like a normal PK value. + obj_pk = model._meta.pk.to_python(n.getAttribute('pk')) + return obj_pk + else: + def m2m_convert(n): + return model._meta.pk.to_python(n.getAttribute('pk')) + return [m2m_convert(c) for c in node.getElementsByTagName("object")] + + def _get_model_from_node(self, node, attr): + """ + Look up a model from a or a + node. + """ + model_identifier = node.getAttribute(attr) + if not model_identifier: + raise base.DeserializationError( + "<%s> node is missing the required '%s' attribute" + % (node.nodeName, attr)) + try: + return apps.get_model(model_identifier) + except (LookupError, TypeError): + raise base.DeserializationError( + "<%s> node has invalid model identifier: '%s'" + % (node.nodeName, model_identifier)) + + +def getInnerText(node): + """Get all the inner text of a DOM node (recursively).""" + # inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html + inner_text = [] + for child in node.childNodes: + if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE: + inner_text.append(child.data) + elif child.nodeType == child.ELEMENT_NODE: + inner_text.extend(getInnerText(child)) + else: + pass + return "".join(inner_text) + + +# Below code based on Christian Heimes' defusedxml + + +class DefusedExpatParser(_ExpatParser): + """ + An expat parser hardened against XML bomb attacks. + + Forbid DTDs, external entity references + """ + def __init__(self, *args, **kwargs): + _ExpatParser.__init__(self, *args, **kwargs) + self.setFeature(handler.feature_external_ges, False) + self.setFeature(handler.feature_external_pes, False) + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + raise DTDForbidden(name, sysid, pubid) + + def entity_decl(self, name, is_parameter_entity, value, base, + sysid, pubid, notation_name): + raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + # expat 1.2 + raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) + + def external_entity_ref_handler(self, context, base, sysid, pubid): + raise ExternalReferenceForbidden(context, base, sysid, pubid) + + def reset(self): + _ExpatParser.reset(self) + parser = self._parser + parser.StartDoctypeDeclHandler = self.start_doctype_decl + parser.EntityDeclHandler = self.entity_decl + parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + parser.ExternalEntityRefHandler = self.external_entity_ref_handler + + +class DefusedXmlException(ValueError): + """Base exception.""" + def __repr__(self): + return str(self) + + +class DTDForbidden(DefusedXmlException): + """Document type definition is forbidden.""" + def __init__(self, name, sysid, pubid): + super().__init__() + self.name = name + self.sysid = sysid + self.pubid = pubid + + def __str__(self): + tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})" + return tpl.format(self.name, self.sysid, self.pubid) + + +class EntitiesForbidden(DefusedXmlException): + """Entity definition is forbidden.""" + def __init__(self, name, value, base, sysid, pubid, notation_name): + super().__init__() + self.name = name + self.value = value + self.base = base + self.sysid = sysid + self.pubid = pubid + self.notation_name = notation_name + + def __str__(self): + tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})" + return tpl.format(self.name, self.sysid, self.pubid) + + +class ExternalReferenceForbidden(DefusedXmlException): + """Resolving an external reference is forbidden.""" + def __init__(self, context, base, sysid, pubid): + super().__init__() + self.context = context + self.base = base + self.sysid = sysid + self.pubid = pubid + + def __str__(self): + tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})" + return tpl.format(self.sysid, self.pubid) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/servers/basehttp.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/servers/basehttp.py new file mode 100644 index 0000000000000000000000000000000000000000..f36317e21641c8de02af0ab054ec015b0f85dfde --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/servers/basehttp.py @@ -0,0 +1,173 @@ +""" +HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21). + +Based on wsgiref.simple_server which is part of the standard library since 2.5. + +This is a simple server for use in testing or debugging Django apps. It hasn't +been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE! +""" + +import logging +import socket +import socketserver +import sys +from wsgiref import simple_server + +from django.core.exceptions import ImproperlyConfigured +from django.core.wsgi import get_wsgi_application +from django.utils.module_loading import import_string + +__all__ = ('WSGIServer', 'WSGIRequestHandler') + +logger = logging.getLogger('django.server') + + +def get_internal_wsgi_application(): + """ + Load and return the WSGI application as configured by the user in + ``settings.WSGI_APPLICATION``. With the default ``startproject`` layout, + this will be the ``application`` object in ``projectname/wsgi.py``. + + This function, and the ``WSGI_APPLICATION`` setting itself, are only useful + for Django's internal server (runserver); external WSGI servers should just + be configured to point to the correct application object directly. + + If settings.WSGI_APPLICATION is not set (is ``None``), return + whatever ``django.core.wsgi.get_wsgi_application`` returns. + """ + from django.conf import settings + app_path = getattr(settings, 'WSGI_APPLICATION') + if app_path is None: + return get_wsgi_application() + + try: + return import_string(app_path) + except ImportError as err: + raise ImproperlyConfigured( + "WSGI application '%s' could not be loaded; " + "Error importing module." % app_path + ) from err + + +def is_broken_pipe_error(): + exc_type, exc_value = sys.exc_info()[:2] + return issubclass(exc_type, socket.error) and exc_value.args[0] == 32 + + +class WSGIServer(simple_server.WSGIServer): + """BaseHTTPServer that implements the Python WSGI protocol""" + + request_queue_size = 10 + + def __init__(self, *args, ipv6=False, allow_reuse_address=True, **kwargs): + if ipv6: + self.address_family = socket.AF_INET6 + self.allow_reuse_address = allow_reuse_address + super().__init__(*args, **kwargs) + + def handle_error(self, request, client_address): + if is_broken_pipe_error(): + logger.info("- Broken pipe from %s\n", client_address) + else: + super().handle_error(request, client_address) + + +class ThreadedWSGIServer(socketserver.ThreadingMixIn, WSGIServer): + """A threaded version of the WSGIServer""" + pass + + +class ServerHandler(simple_server.ServerHandler): + http_version = '1.1' + + def handle_error(self): + # Ignore broken pipe errors, otherwise pass on + if not is_broken_pipe_error(): + super().handle_error() + + +class WSGIRequestHandler(simple_server.WSGIRequestHandler): + protocol_version = 'HTTP/1.1' + + def address_string(self): + # Short-circuit parent method to not call socket.getfqdn + return self.client_address[0] + + def log_message(self, format, *args): + extra = { + 'request': self.request, + 'server_time': self.log_date_time_string(), + } + if args[1][0] == '4': + # 0x16 = Handshake, 0x03 = SSL 3.0 or TLS 1.x + if args[0].startswith('\x16\x03'): + extra['status_code'] = 500 + logger.error( + "You're accessing the development server over HTTPS, but " + "it only supports HTTP.\n", extra=extra, + ) + return + + if args[1].isdigit() and len(args[1]) == 3: + status_code = int(args[1]) + extra['status_code'] = status_code + + if status_code >= 500: + level = logger.error + elif status_code >= 400: + level = logger.warning + else: + level = logger.info + else: + level = logger.info + + level(format, *args, extra=extra) + + def get_environ(self): + # Strip all headers with underscores in the name before constructing + # the WSGI environ. This prevents header-spoofing based on ambiguity + # between underscores and dashes both normalized to underscores in WSGI + # env vars. Nginx and Apache 2.4+ both do this as well. + for k, v in self.headers.items(): + if '_' in k: + del self.headers[k] + + return super().get_environ() + + def handle(self): + """Copy of WSGIRequestHandler.handle() but with different ServerHandler""" + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return + + if not self.parse_request(): # An error code has been sent, just exit + return + + handler = ServerHandler( + self.rfile, self.wfile, self.get_stderr(), self.get_environ() + ) + handler.request_handler = self # backpointer for logging + handler.run(self.server.get_app()) + + +def run(addr, port, wsgi_handler, ipv6=False, threading=False, server_cls=WSGIServer): + server_address = (addr, port) + if threading: + httpd_cls = type('WSGIServer', (socketserver.ThreadingMixIn, server_cls), {}) + else: + httpd_cls = server_cls + httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6) + if threading: + # ThreadingMixIn.daemon_threads indicates how threads will behave on an + # abrupt shutdown; like quitting the server by the user or restarting + # by the auto-reloader. True means the server will not wait for thread + # termination before it quits. This will make auto-reloader faster + # and will prevent the need to kill the server manually if a thread + # isn't terminating correctly. + httpd.daemon_threads = True + httpd.set_app(wsgi_handler) + httpd.serve_forever() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/signals.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/signals.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9618dd0c58475264b2d1408974d7e6c9c7122f --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/signals.py @@ -0,0 +1,6 @@ +from django.dispatch import Signal + +request_started = Signal(providing_args=["environ"]) +request_finished = Signal() +got_request_exception = Signal(providing_args=["request"]) +setting_changed = Signal(providing_args=["setting", "value", "enter"]) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/signing.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/signing.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ed5fe36e52a0cf8b9e666f3e79216a7e5438ed --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/signing.py @@ -0,0 +1,199 @@ +""" +Functions for creating and restoring url-safe signed JSON objects. + +The format used looks like this: + +>>> signing.dumps("hello") +'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk' + +There are two components here, separated by a ':'. The first component is a +URLsafe base64 encoded JSON of the object passed to dumps(). The second +component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret" + +signing.loads(s) checks the signature and returns the deserialized object. +If the signature fails, a BadSignature exception is raised. + +>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk") +'hello' +>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified") +... +BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified + +You can optionally compress the JSON prior to base64 encoding it to save +space, using the compress=True argument. This checks if compression actually +helps and only applies compression if the result is a shorter string: + +>>> signing.dumps(range(1, 20), compress=True) +'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ' + +The fact that the string is compressed is signalled by the prefixed '.' at the +start of the base64 JSON. + +There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'. +These functions make use of all of them. +""" + +import base64 +import datetime +import json +import re +import time +import zlib + +from django.conf import settings +from django.utils import baseconv +from django.utils.crypto import constant_time_compare, salted_hmac +from django.utils.encoding import force_bytes +from django.utils.module_loading import import_string + +_SEP_UNSAFE = re.compile(r'^[A-z0-9-_=]*$') + + +class BadSignature(Exception): + """Signature does not match.""" + pass + + +class SignatureExpired(BadSignature): + """Signature timestamp is older than required max_age.""" + pass + + +def b64_encode(s): + return base64.urlsafe_b64encode(s).strip(b'=') + + +def b64_decode(s): + pad = b'=' * (-len(s) % 4) + return base64.urlsafe_b64decode(s + pad) + + +def base64_hmac(salt, value, key): + return b64_encode(salted_hmac(salt, value, key).digest()).decode() + + +def get_cookie_signer(salt='django.core.signing.get_cookie_signer'): + Signer = import_string(settings.SIGNING_BACKEND) + key = force_bytes(settings.SECRET_KEY) + return Signer(b'django.http.cookies' + key, salt=salt) + + +class JSONSerializer: + """ + Simple wrapper around json to be used in signing.dumps and + signing.loads. + """ + def dumps(self, obj): + return json.dumps(obj, separators=(',', ':')).encode('latin-1') + + def loads(self, data): + return json.loads(data.decode('latin-1')) + + +def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False): + """ + Return URL-safe, hmac/SHA1 signed base64 compressed JSON string. If key is + None, use settings.SECRET_KEY instead. + + If compress is True (not the default), check if compressing using zlib can + save some space. Prepend a '.' to signify compression. This is included + in the signature, to protect against zip bombs. + + Salt can be used to namespace the hash, so that a signed string is + only valid for a given namespace. Leaving this at the default + value or re-using a salt value across different parts of your + application without good cause is a security risk. + + The serializer is expected to return a bytestring. + """ + data = serializer().dumps(obj) + + # Flag for if it's been compressed or not + is_compressed = False + + if compress: + # Avoid zlib dependency unless compress is being used + compressed = zlib.compress(data) + if len(compressed) < (len(data) - 1): + data = compressed + is_compressed = True + base64d = b64_encode(data).decode() + if is_compressed: + base64d = '.' + base64d + return TimestampSigner(key, salt=salt).sign(base64d) + + +def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None): + """ + Reverse of dumps(), raise BadSignature if signature fails. + + The serializer is expected to accept a bytestring. + """ + # TimestampSigner.unsign() returns str but base64 and zlib compression + # operate on bytes. + base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age)) + decompress = False + if base64d[:1] == b'.': + # It's compressed; uncompress it first + base64d = base64d[1:] + decompress = True + data = b64_decode(base64d) + if decompress: + data = zlib.decompress(data) + return serializer().loads(data) + + +class Signer: + + def __init__(self, key=None, sep=':', salt=None): + # Use of native strings in all versions of Python + self.key = key or settings.SECRET_KEY + self.sep = sep + if _SEP_UNSAFE.match(self.sep): + raise ValueError( + 'Unsafe Signer separator: %r (cannot be empty or consist of ' + 'only A-z0-9-_=)' % sep, + ) + self.salt = salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__) + + def signature(self, value): + return base64_hmac(self.salt + 'signer', value, self.key) + + def sign(self, value): + return '%s%s%s' % (value, self.sep, self.signature(value)) + + def unsign(self, signed_value): + if self.sep not in signed_value: + raise BadSignature('No "%s" found in value' % self.sep) + value, sig = signed_value.rsplit(self.sep, 1) + if constant_time_compare(sig, self.signature(value)): + return value + raise BadSignature('Signature "%s" does not match' % sig) + + +class TimestampSigner(Signer): + + def timestamp(self): + return baseconv.base62.encode(int(time.time())) + + def sign(self, value): + value = '%s%s%s' % (value, self.sep, self.timestamp()) + return super().sign(value) + + def unsign(self, value, max_age=None): + """ + Retrieve original value and check it wasn't signed more + than max_age seconds ago. + """ + result = super().unsign(value) + value, timestamp = result.rsplit(self.sep, 1) + timestamp = baseconv.base62.decode(timestamp) + if max_age is not None: + if isinstance(max_age, datetime.timedelta): + max_age = max_age.total_seconds() + # Check timestamp is not older than max_age + age = time.time() - timestamp + if age > max_age: + raise SignatureExpired( + 'Signature age %s > %s seconds' % (age, max_age)) + return value diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/validators.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/validators.py new file mode 100644 index 0000000000000000000000000000000000000000..07236b7d26e8f729ec90a83ec5878adb2ec2a76a --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/validators.py @@ -0,0 +1,536 @@ +import ipaddress +import os +import re +from urllib.parse import urlsplit, urlunsplit + +from django.core.exceptions import ValidationError +from django.utils.deconstruct import deconstructible +from django.utils.functional import SimpleLazyObject +from django.utils.ipv6 import is_valid_ipv6_address +from django.utils.translation import gettext_lazy as _, ngettext_lazy + +# These values, if given to validate(), will trigger the self.required check. +EMPTY_VALUES = (None, '', [], (), {}) + + +def _lazy_re_compile(regex, flags=0): + """Lazily compile a regex with flags.""" + def _compile(): + # Compile the regex if it was not passed pre-compiled. + if isinstance(regex, str): + return re.compile(regex, flags) + else: + assert not flags, "flags must be empty if regex is passed pre-compiled" + return regex + return SimpleLazyObject(_compile) + + +@deconstructible +class RegexValidator: + regex = '' + message = _('Enter a valid value.') + code = 'invalid' + inverse_match = False + flags = 0 + + def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None): + if regex is not None: + self.regex = regex + if message is not None: + self.message = message + if code is not None: + self.code = code + if inverse_match is not None: + self.inverse_match = inverse_match + if flags is not None: + self.flags = flags + if self.flags and not isinstance(self.regex, str): + raise TypeError("If the flags are set, regex must be a regular expression string.") + + self.regex = _lazy_re_compile(self.regex, self.flags) + + def __call__(self, value): + """ + Validate that the input contains (or does *not* contain, if + inverse_match is True) a match for the regular expression. + """ + regex_matches = bool(self.regex.search(str(value))) + invalid_input = regex_matches if self.inverse_match else not regex_matches + if invalid_input: + raise ValidationError(self.message, code=self.code) + + def __eq__(self, other): + return ( + isinstance(other, RegexValidator) and + self.regex.pattern == other.regex.pattern and + self.regex.flags == other.regex.flags and + (self.message == other.message) and + (self.code == other.code) and + (self.inverse_match == other.inverse_match) + ) + + +@deconstructible +class URLValidator(RegexValidator): + ul = '\u00a1-\uffff' # unicode letters range (must not be a raw string) + + # IP patterns + ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' + ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later) + + # Host patterns + hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?' + # Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1 + domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(? ACE + except UnicodeError: # invalid domain part + raise e + url = urlunsplit((scheme, netloc, path, query, fragment)) + super().__call__(url) + else: + raise + else: + # Now verify IPv6 in the netloc part + host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc) + if host_match: + potential_ip = host_match.groups()[0] + try: + validate_ipv6_address(potential_ip) + except ValidationError: + raise ValidationError(self.message, code=self.code) + + # The maximum length of a full host name is 253 characters per RFC 1034 + # section 3.1. It's defined to be 255 bytes or less, but this includes + # one byte for the length of the name and one byte for the trailing dot + # that's used to indicate absolute names in DNS. + if len(urlsplit(value).netloc) > 253: + raise ValidationError(self.message, code=self.code) + + +integer_validator = RegexValidator( + _lazy_re_compile(r'^-?\d+\Z'), + message=_('Enter a valid integer.'), + code='invalid', +) + + +def validate_integer(value): + return integer_validator(value) + + +@deconstructible +class EmailValidator: + message = _('Enter a valid email address.') + code = 'invalid' + user_regex = _lazy_re_compile( + r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom + r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string + re.IGNORECASE) + domain_regex = _lazy_re_compile( + # max length for domain name labels is 63 characters per RFC 1034 + r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(? b + + +@deconstructible +class MinValueValidator(BaseValidator): + message = _('Ensure this value is greater than or equal to %(limit_value)s.') + code = 'min_value' + + def compare(self, a, b): + return a < b + + +@deconstructible +class MinLengthValidator(BaseValidator): + message = ngettext_lazy( + 'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).', + 'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).', + 'limit_value') + code = 'min_length' + + def compare(self, a, b): + return a < b + + def clean(self, x): + return len(x) + + +@deconstructible +class MaxLengthValidator(BaseValidator): + message = ngettext_lazy( + 'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).', + 'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).', + 'limit_value') + code = 'max_length' + + def compare(self, a, b): + return a > b + + def clean(self, x): + return len(x) + + +@deconstructible +class DecimalValidator: + """ + Validate that the input does not exceed the maximum number of digits + expected, otherwise raise ValidationError. + """ + messages = { + 'max_digits': ngettext_lazy( + 'Ensure that there are no more than %(max)s digit in total.', + 'Ensure that there are no more than %(max)s digits in total.', + 'max' + ), + 'max_decimal_places': ngettext_lazy( + 'Ensure that there are no more than %(max)s decimal place.', + 'Ensure that there are no more than %(max)s decimal places.', + 'max' + ), + 'max_whole_digits': ngettext_lazy( + 'Ensure that there are no more than %(max)s digit before the decimal point.', + 'Ensure that there are no more than %(max)s digits before the decimal point.', + 'max' + ), + } + + def __init__(self, max_digits, decimal_places): + self.max_digits = max_digits + self.decimal_places = decimal_places + + def __call__(self, value): + digit_tuple, exponent = value.as_tuple()[1:] + if exponent >= 0: + # A positive exponent adds that many trailing zeros. + digits = len(digit_tuple) + exponent + decimals = 0 + else: + # If the absolute value of the negative exponent is larger than the + # number of digits, then it's the same as the number of digits, + # because it'll consume all of the digits in digit_tuple and then + # add abs(exponent) - len(digit_tuple) leading zeros after the + # decimal point. + if abs(exponent) > len(digit_tuple): + digits = decimals = abs(exponent) + else: + digits = len(digit_tuple) + decimals = abs(exponent) + whole_digits = digits - decimals + + if self.max_digits is not None and digits > self.max_digits: + raise ValidationError( + self.messages['max_digits'], + code='max_digits', + params={'max': self.max_digits}, + ) + if self.decimal_places is not None and decimals > self.decimal_places: + raise ValidationError( + self.messages['max_decimal_places'], + code='max_decimal_places', + params={'max': self.decimal_places}, + ) + if (self.max_digits is not None and self.decimal_places is not None and + whole_digits > (self.max_digits - self.decimal_places)): + raise ValidationError( + self.messages['max_whole_digits'], + code='max_whole_digits', + params={'max': (self.max_digits - self.decimal_places)}, + ) + + def __eq__(self, other): + return ( + isinstance(other, self.__class__) and + self.max_digits == other.max_digits and + self.decimal_places == other.decimal_places + ) + + +@deconstructible +class FileExtensionValidator: + message = _( + "File extension '%(extension)s' is not allowed. " + "Allowed extensions are: '%(allowed_extensions)s'." + ) + code = 'invalid_extension' + + def __init__(self, allowed_extensions=None, message=None, code=None): + if allowed_extensions is not None: + allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions] + self.allowed_extensions = allowed_extensions + if message is not None: + self.message = message + if code is not None: + self.code = code + + def __call__(self, value): + extension = os.path.splitext(value.name)[1][1:].lower() + if self.allowed_extensions is not None and extension not in self.allowed_extensions: + raise ValidationError( + self.message, + code=self.code, + params={ + 'extension': extension, + 'allowed_extensions': ', '.join(self.allowed_extensions) + } + ) + + def __eq__(self, other): + return ( + isinstance(other, self.__class__) and + self.allowed_extensions == other.allowed_extensions and + self.message == other.message and + self.code == other.code + ) + + +def get_available_image_extensions(): + try: + from PIL import Image + except ImportError: + return [] + else: + Image.init() + return [ext.lower()[1:] for ext in Image.EXTENSION] + + +validate_image_file_extension = FileExtensionValidator( + allowed_extensions=get_available_image_extensions(), +) + + +@deconstructible +class ProhibitNullCharactersValidator: + """Validate that the string doesn't contain the null character.""" + message = _('Null characters are not allowed.') + code = 'null_characters_not_allowed' + + def __init__(self, message=None, code=None): + if message is not None: + self.message = message + if code is not None: + self.code = code + + def __call__(self, value): + if '\x00' in str(value): + raise ValidationError(self.message, code=self.code) + + def __eq__(self, other): + return ( + isinstance(other, self.__class__) and + self.message == other.message and + self.code == other.code + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/wsgi.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/wsgi.py new file mode 100644 index 0000000000000000000000000000000000000000..35e0fa8e80fe280d02b90f9516e4f3839593fb39 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/core/wsgi.py @@ -0,0 +1,13 @@ +import django +from django.core.handlers.wsgi import WSGIHandler + + +def get_wsgi_application(): + """ + The public interface to Django's WSGI support. Return a WSGI callable. + + Avoids making django.core.handlers.WSGIHandler a public API, in case the + internal WSGI implementation changes or moves in the future. + """ + django.setup(set_prefix=False) + return WSGIHandler() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5708c1c5de4552bc8e15ecaae658199058cdd3d6 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/__init__.py @@ -0,0 +1,65 @@ +from django.core import signals +from django.db.utils import ( + DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, ConnectionHandler, + ConnectionRouter, DatabaseError, DataError, Error, IntegrityError, + InterfaceError, InternalError, NotSupportedError, OperationalError, + ProgrammingError, +) + +__all__ = [ + 'connection', 'connections', 'router', 'DatabaseError', 'IntegrityError', + 'InternalError', 'ProgrammingError', 'DataError', 'NotSupportedError', + 'Error', 'InterfaceError', 'OperationalError', 'DEFAULT_DB_ALIAS', + 'DJANGO_VERSION_PICKLE_KEY', +] + +connections = ConnectionHandler() + +router = ConnectionRouter() + + +# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so we +# manually create the dictionary from the settings, passing only the settings +# that the database backends care about. +# We load all these up for backwards compatibility, you should use +# connections['default'] instead. +class DefaultConnectionProxy: + """ + Proxy for accessing the default DatabaseWrapper object's attributes. If you + need to access the DatabaseWrapper object itself, use + connections[DEFAULT_DB_ALIAS] instead. + """ + def __getattr__(self, item): + return getattr(connections[DEFAULT_DB_ALIAS], item) + + def __setattr__(self, name, value): + return setattr(connections[DEFAULT_DB_ALIAS], name, value) + + def __delattr__(self, name): + return delattr(connections[DEFAULT_DB_ALIAS], name) + + def __eq__(self, other): + return connections[DEFAULT_DB_ALIAS] == other + + +connection = DefaultConnectionProxy() + + +# Register an event to reset saved queries when a Django request is started. +def reset_queries(**kwargs): + for conn in connections.all(): + conn.queries_log.clear() + + +signals.request_started.connect(reset_queries) + + +# Register an event to reset transaction state and close connections past +# their lifetime. +def close_old_connections(**kwargs): + for conn in connections.all(): + conn.close_if_unusable_or_obsolete() + + +signals.request_started.connect(close_old_connections) +signals.request_finished.connect(close_old_connections) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/base.py new file mode 100644 index 0000000000000000000000000000000000000000..468eb16e14038326dee6b68db5ca90d9d0fef11e --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/base.py @@ -0,0 +1,661 @@ +import copy +import time +import warnings +from collections import deque +from contextlib import contextmanager + +import _thread +import pytz + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.db import DEFAULT_DB_ALIAS +from django.db.backends import utils +from django.db.backends.base.validation import BaseDatabaseValidation +from django.db.backends.signals import connection_created +from django.db.transaction import TransactionManagementError +from django.db.utils import DatabaseError, DatabaseErrorWrapper +from django.utils import timezone +from django.utils.functional import cached_property + +NO_DB_ALIAS = '__no_db__' + + +class BaseDatabaseWrapper: + """Represent a database connection.""" + # Mapping of Field objects to their column types. + data_types = {} + # Mapping of Field objects to their SQL suffix such as AUTOINCREMENT. + data_types_suffix = {} + # Mapping of Field objects to their SQL for CHECK constraints. + data_type_check_constraints = {} + ops = None + vendor = 'unknown' + display_name = 'unknown' + SchemaEditorClass = None + # Classes instantiated in __init__(). + client_class = None + creation_class = None + features_class = None + introspection_class = None + ops_class = None + validation_class = BaseDatabaseValidation + + queries_limit = 9000 + + def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS, + allow_thread_sharing=False): + # Connection related attributes. + # The underlying database connection. + self.connection = None + # `settings_dict` should be a dictionary containing keys such as + # NAME, USER, etc. It's called `settings_dict` instead of `settings` + # to disambiguate it from Django settings modules. + self.settings_dict = settings_dict + self.alias = alias + # Query logging in debug mode or when explicitly enabled. + self.queries_log = deque(maxlen=self.queries_limit) + self.force_debug_cursor = False + + # Transaction related attributes. + # Tracks if the connection is in autocommit mode. Per PEP 249, by + # default, it isn't. + self.autocommit = False + # Tracks if the connection is in a transaction managed by 'atomic'. + self.in_atomic_block = False + # Increment to generate unique savepoint ids. + self.savepoint_state = 0 + # List of savepoints created by 'atomic'. + self.savepoint_ids = [] + # Tracks if the outermost 'atomic' block should commit on exit, + # ie. if autocommit was active on entry. + self.commit_on_exit = True + # Tracks if the transaction should be rolled back to the next + # available savepoint because of an exception in an inner block. + self.needs_rollback = False + + # Connection termination related attributes. + self.close_at = None + self.closed_in_transaction = False + self.errors_occurred = False + + # Thread-safety related attributes. + self.allow_thread_sharing = allow_thread_sharing + self._thread_ident = _thread.get_ident() + + # A list of no-argument functions to run when the transaction commits. + # Each entry is an (sids, func) tuple, where sids is a set of the + # active savepoint IDs when this function was registered. + self.run_on_commit = [] + + # Should we run the on-commit hooks the next time set_autocommit(True) + # is called? + self.run_commit_hooks_on_set_autocommit_on = False + + # A stack of wrappers to be invoked around execute()/executemany() + # calls. Each entry is a function taking five arguments: execute, sql, + # params, many, and context. It's the function's responsibility to + # call execute(sql, params, many, context). + self.execute_wrappers = [] + + self.client = self.client_class(self) + self.creation = self.creation_class(self) + self.features = self.features_class(self) + self.introspection = self.introspection_class(self) + self.ops = self.ops_class(self) + self.validation = self.validation_class(self) + + def ensure_timezone(self): + """ + Ensure the connection's timezone is set to `self.timezone_name` and + return whether it changed or not. + """ + return False + + @cached_property + def timezone(self): + """ + Time zone for datetimes stored as naive values in the database. + + Return a tzinfo object or None. + + This is only needed when time zone support is enabled and the database + doesn't support time zones. (When the database supports time zones, + the adapter handles aware datetimes so Django doesn't need to.) + """ + if not settings.USE_TZ: + return None + elif self.features.supports_timezones: + return None + elif self.settings_dict['TIME_ZONE'] is None: + return timezone.utc + else: + return pytz.timezone(self.settings_dict['TIME_ZONE']) + + @cached_property + def timezone_name(self): + """ + Name of the time zone of the database connection. + """ + if not settings.USE_TZ: + return settings.TIME_ZONE + elif self.settings_dict['TIME_ZONE'] is None: + return 'UTC' + else: + return self.settings_dict['TIME_ZONE'] + + @property + def queries_logged(self): + return self.force_debug_cursor or settings.DEBUG + + @property + def queries(self): + if len(self.queries_log) == self.queries_log.maxlen: + warnings.warn( + "Limit for query logging exceeded, only the last {} queries " + "will be returned.".format(self.queries_log.maxlen)) + return list(self.queries_log) + + # ##### Backend-specific methods for creating connections and cursors ##### + + def get_connection_params(self): + """Return a dict of parameters suitable for get_new_connection.""" + raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method') + + def get_new_connection(self, conn_params): + """Open a connection to the database.""" + raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method') + + def init_connection_state(self): + """Initialize the database connection settings.""" + raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method') + + def create_cursor(self, name=None): + """Create a cursor. Assume that a connection is established.""" + raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method') + + # ##### Backend-specific methods for creating connections ##### + + def connect(self): + """Connect to the database. Assume that the connection is closed.""" + # Check for invalid configurations. + self.check_settings() + # In case the previous connection was closed while in an atomic block + self.in_atomic_block = False + self.savepoint_ids = [] + self.needs_rollback = False + # Reset parameters defining when to close the connection + max_age = self.settings_dict['CONN_MAX_AGE'] + self.close_at = None if max_age is None else time.time() + max_age + self.closed_in_transaction = False + self.errors_occurred = False + # Establish the connection + conn_params = self.get_connection_params() + self.connection = self.get_new_connection(conn_params) + self.set_autocommit(self.settings_dict['AUTOCOMMIT']) + self.init_connection_state() + connection_created.send(sender=self.__class__, connection=self) + + self.run_on_commit = [] + + def check_settings(self): + if self.settings_dict['TIME_ZONE'] is not None: + if not settings.USE_TZ: + raise ImproperlyConfigured( + "Connection '%s' cannot set TIME_ZONE because USE_TZ is " + "False." % self.alias) + elif self.features.supports_timezones: + raise ImproperlyConfigured( + "Connection '%s' cannot set TIME_ZONE because its engine " + "handles time zones conversions natively." % self.alias) + + def ensure_connection(self): + """Guarantee that a connection to the database is established.""" + if self.connection is None: + with self.wrap_database_errors: + self.connect() + + # ##### Backend-specific wrappers for PEP-249 connection methods ##### + + def _prepare_cursor(self, cursor): + """ + Validate the connection is usable and perform database cursor wrapping. + """ + self.validate_thread_sharing() + if self.queries_logged: + wrapped_cursor = self.make_debug_cursor(cursor) + else: + wrapped_cursor = self.make_cursor(cursor) + return wrapped_cursor + + def _cursor(self, name=None): + self.ensure_connection() + with self.wrap_database_errors: + return self._prepare_cursor(self.create_cursor(name)) + + def _commit(self): + if self.connection is not None: + with self.wrap_database_errors: + return self.connection.commit() + + def _rollback(self): + if self.connection is not None: + with self.wrap_database_errors: + return self.connection.rollback() + + def _close(self): + if self.connection is not None: + with self.wrap_database_errors: + return self.connection.close() + + # ##### Generic wrappers for PEP-249 connection methods ##### + + def cursor(self): + """Create a cursor, opening a connection if necessary.""" + return self._cursor() + + def commit(self): + """Commit a transaction and reset the dirty flag.""" + self.validate_thread_sharing() + self.validate_no_atomic_block() + self._commit() + # A successful commit means that the database connection works. + self.errors_occurred = False + self.run_commit_hooks_on_set_autocommit_on = True + + def rollback(self): + """Roll back a transaction and reset the dirty flag.""" + self.validate_thread_sharing() + self.validate_no_atomic_block() + self._rollback() + # A successful rollback means that the database connection works. + self.errors_occurred = False + self.needs_rollback = False + self.run_on_commit = [] + + def close(self): + """Close the connection to the database.""" + self.validate_thread_sharing() + self.run_on_commit = [] + + # Don't call validate_no_atomic_block() to avoid making it difficult + # to get rid of a connection in an invalid state. The next connect() + # will reset the transaction state anyway. + if self.closed_in_transaction or self.connection is None: + return + try: + self._close() + finally: + if self.in_atomic_block: + self.closed_in_transaction = True + self.needs_rollback = True + else: + self.connection = None + + # ##### Backend-specific savepoint management methods ##### + + def _savepoint(self, sid): + with self.cursor() as cursor: + cursor.execute(self.ops.savepoint_create_sql(sid)) + + def _savepoint_rollback(self, sid): + with self.cursor() as cursor: + cursor.execute(self.ops.savepoint_rollback_sql(sid)) + + def _savepoint_commit(self, sid): + with self.cursor() as cursor: + cursor.execute(self.ops.savepoint_commit_sql(sid)) + + def _savepoint_allowed(self): + # Savepoints cannot be created outside a transaction + return self.features.uses_savepoints and not self.get_autocommit() + + # ##### Generic savepoint management methods ##### + + def savepoint(self): + """ + Create a savepoint inside the current transaction. Return an + identifier for the savepoint that will be used for the subsequent + rollback or commit. Do nothing if savepoints are not supported. + """ + if not self._savepoint_allowed(): + return + + thread_ident = _thread.get_ident() + tid = str(thread_ident).replace('-', '') + + self.savepoint_state += 1 + sid = "s%s_x%d" % (tid, self.savepoint_state) + + self.validate_thread_sharing() + self._savepoint(sid) + + return sid + + def savepoint_rollback(self, sid): + """ + Roll back to a savepoint. Do nothing if savepoints are not supported. + """ + if not self._savepoint_allowed(): + return + + self.validate_thread_sharing() + self._savepoint_rollback(sid) + + # Remove any callbacks registered while this savepoint was active. + self.run_on_commit = [ + (sids, func) for (sids, func) in self.run_on_commit if sid not in sids + ] + + def savepoint_commit(self, sid): + """ + Release a savepoint. Do nothing if savepoints are not supported. + """ + if not self._savepoint_allowed(): + return + + self.validate_thread_sharing() + self._savepoint_commit(sid) + + def clean_savepoints(self): + """ + Reset the counter used to generate unique savepoint ids in this thread. + """ + self.savepoint_state = 0 + + # ##### Backend-specific transaction management methods ##### + + def _set_autocommit(self, autocommit): + """ + Backend-specific implementation to enable or disable autocommit. + """ + raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method') + + # ##### Generic transaction management methods ##### + + def get_autocommit(self): + """Get the autocommit state.""" + self.ensure_connection() + return self.autocommit + + def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False): + """ + Enable or disable autocommit. + + The usual way to start a transaction is to turn autocommit off. + SQLite does not properly start a transaction when disabling + autocommit. To avoid this buggy behavior and to actually enter a new + transaction, an explcit BEGIN is required. Using + force_begin_transaction_with_broken_autocommit=True will issue an + explicit BEGIN with SQLite. This option will be ignored for other + backends. + """ + self.validate_no_atomic_block() + self.ensure_connection() + + start_transaction_under_autocommit = ( + force_begin_transaction_with_broken_autocommit and not autocommit and + self.features.autocommits_when_autocommit_is_off + ) + + if start_transaction_under_autocommit: + self._start_transaction_under_autocommit() + else: + self._set_autocommit(autocommit) + + self.autocommit = autocommit + + if autocommit and self.run_commit_hooks_on_set_autocommit_on: + self.run_and_clear_commit_hooks() + self.run_commit_hooks_on_set_autocommit_on = False + + def get_rollback(self): + """Get the "needs rollback" flag -- for *advanced use* only.""" + if not self.in_atomic_block: + raise TransactionManagementError( + "The rollback flag doesn't work outside of an 'atomic' block.") + return self.needs_rollback + + def set_rollback(self, rollback): + """ + Set or unset the "needs rollback" flag -- for *advanced use* only. + """ + if not self.in_atomic_block: + raise TransactionManagementError( + "The rollback flag doesn't work outside of an 'atomic' block.") + self.needs_rollback = rollback + + def validate_no_atomic_block(self): + """Raise an error if an atomic block is active.""" + if self.in_atomic_block: + raise TransactionManagementError( + "This is forbidden when an 'atomic' block is active.") + + def validate_no_broken_transaction(self): + if self.needs_rollback: + raise TransactionManagementError( + "An error occurred in the current transaction. You can't " + "execute queries until the end of the 'atomic' block.") + + # ##### Foreign key constraints checks handling ##### + + @contextmanager + def constraint_checks_disabled(self): + """ + Disable foreign key constraint checking. + """ + disabled = self.disable_constraint_checking() + try: + yield + finally: + if disabled: + self.enable_constraint_checking() + + def disable_constraint_checking(self): + """ + Backends can implement as needed to temporarily disable foreign key + constraint checking. Should return True if the constraints were + disabled and will need to be reenabled. + """ + return False + + def enable_constraint_checking(self): + """ + Backends can implement as needed to re-enable foreign key constraint + checking. + """ + pass + + def check_constraints(self, table_names=None): + """ + Backends can override this method if they can apply constraint + checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an + IntegrityError if any invalid foreign key references are encountered. + """ + pass + + # ##### Connection termination handling ##### + + def is_usable(self): + """ + Test if the database connection is usable. + + This method may assume that self.connection is not None. + + Actual implementations should take care not to raise exceptions + as that may prevent Django from recycling unusable connections. + """ + raise NotImplementedError( + "subclasses of BaseDatabaseWrapper may require an is_usable() method") + + def close_if_unusable_or_obsolete(self): + """ + Close the current connection if unrecoverable errors have occurred + or if it outlived its maximum age. + """ + if self.connection is not None: + # If the application didn't restore the original autocommit setting, + # don't take chances, drop the connection. + if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']: + self.close() + return + + # If an exception other than DataError or IntegrityError occurred + # since the last commit / rollback, check if the connection works. + if self.errors_occurred: + if self.is_usable(): + self.errors_occurred = False + else: + self.close() + return + + if self.close_at is not None and time.time() >= self.close_at: + self.close() + return + + # ##### Thread safety handling ##### + + def validate_thread_sharing(self): + """ + Validate that the connection isn't accessed by another thread than the + one which originally created it, unless the connection was explicitly + authorized to be shared between threads (via the `allow_thread_sharing` + property). Raise an exception if the validation fails. + """ + if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()): + raise DatabaseError( + "DatabaseWrapper objects created in a " + "thread can only be used in that same thread. The object " + "with alias '%s' was created in thread id %s and this is " + "thread id %s." + % (self.alias, self._thread_ident, _thread.get_ident()) + ) + + # ##### Miscellaneous ##### + + def prepare_database(self): + """ + Hook to do any database check or preparation, generally called before + migrating a project or an app. + """ + pass + + @cached_property + def wrap_database_errors(self): + """ + Context manager and decorator that re-throws backend-specific database + exceptions using Django's common wrappers. + """ + return DatabaseErrorWrapper(self) + + def chunked_cursor(self): + """ + Return a cursor that tries to avoid caching in the database (if + supported by the database), otherwise return a regular cursor. + """ + return self.cursor() + + def make_debug_cursor(self, cursor): + """Create a cursor that logs all queries in self.queries_log.""" + return utils.CursorDebugWrapper(cursor, self) + + def make_cursor(self, cursor): + """Create a cursor without debug logging.""" + return utils.CursorWrapper(cursor, self) + + @contextmanager + def temporary_connection(self): + """ + Context manager that ensures that a connection is established, and + if it opened one, closes it to avoid leaving a dangling connection. + This is useful for operations outside of the request-response cycle. + + Provide a cursor: with self.temporary_connection() as cursor: ... + """ + must_close = self.connection is None + cursor = self.cursor() + try: + yield cursor + finally: + cursor.close() + if must_close: + self.close() + + @property + def _nodb_connection(self): + """ + Return an alternative connection to be used when there is no need to + access the main database, specifically for test db creation/deletion. + This also prevents the production database from being exposed to + potential child threads while (or after) the test database is destroyed. + Refs #10868, #17786, #16969. + """ + settings_dict = self.settings_dict.copy() + settings_dict['NAME'] = None + nodb_connection = self.__class__( + settings_dict, + alias=NO_DB_ALIAS, + allow_thread_sharing=False) + return nodb_connection + + def _start_transaction_under_autocommit(self): + """ + Only required when autocommits_when_autocommit_is_off = True. + """ + raise NotImplementedError( + 'subclasses of BaseDatabaseWrapper may require a ' + '_start_transaction_under_autocommit() method' + ) + + def schema_editor(self, *args, **kwargs): + """ + Return a new instance of this backend's SchemaEditor. + """ + if self.SchemaEditorClass is None: + raise NotImplementedError( + 'The SchemaEditorClass attribute of this database wrapper is still None') + return self.SchemaEditorClass(self, *args, **kwargs) + + def on_commit(self, func): + if self.in_atomic_block: + # Transaction in progress; save for execution on commit. + self.run_on_commit.append((set(self.savepoint_ids), func)) + elif not self.get_autocommit(): + raise TransactionManagementError('on_commit() cannot be used in manual transaction management') + else: + # No transaction in progress and in autocommit mode; execute + # immediately. + func() + + def run_and_clear_commit_hooks(self): + self.validate_no_atomic_block() + current_run_on_commit = self.run_on_commit + self.run_on_commit = [] + while current_run_on_commit: + sids, func = current_run_on_commit.pop(0) + func() + + @contextmanager + def execute_wrapper(self, wrapper): + """ + Return a context manager under which the wrapper is applied to suitable + database query executions. + """ + self.execute_wrappers.append(wrapper) + try: + yield + finally: + self.execute_wrappers.pop() + + def copy(self, alias=None, allow_thread_sharing=None): + """ + Return a copy of this connection. + + For tests that require two connections to the same database. + """ + settings_dict = copy.deepcopy(self.settings_dict) + if alias is None: + alias = self.alias + if allow_thread_sharing is None: + allow_thread_sharing = self.allow_thread_sharing + return type(self)(settings_dict, alias, allow_thread_sharing) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/client.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/client.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a0cda35d240e54a416030f576ad21c1ae2bbc7 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/client.py @@ -0,0 +1,12 @@ +class BaseDatabaseClient: + """Encapsulate backend-specific methods for opening a client shell.""" + # This should be a string representing the name of the executable + # (e.g., "psql"). Subclasses must override this. + executable_name = None + + def __init__(self, connection): + # connection is an instance of BaseDatabaseWrapper. + self.connection = connection + + def runshell(self): + raise NotImplementedError('subclasses of BaseDatabaseClient must provide a runshell() method') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/creation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2542a9f519969c8c85d372f997442fa17eee5e --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/creation.py @@ -0,0 +1,296 @@ +import sys +from io import StringIO + +from django.apps import apps +from django.conf import settings +from django.core import serializers +from django.db import router + +# The prefix to put on the default database name when creating +# the test database. +TEST_DATABASE_PREFIX = 'test_' + + +class BaseDatabaseCreation: + """ + Encapsulate backend-specific differences pertaining to creation and + destruction of the test database. + """ + def __init__(self, connection): + self.connection = connection + + @property + def _nodb_connection(self): + """ + Used to be defined here, now moved to DatabaseWrapper. + """ + return self.connection._nodb_connection + + def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=False): + """ + Create a test database, prompting the user for confirmation if the + database already exists. Return the name of the test database created. + """ + # Don't import django.core.management if it isn't needed. + from django.core.management import call_command + + test_database_name = self._get_test_db_name() + + if verbosity >= 1: + action = 'Creating' + if keepdb: + action = "Using existing" + + print("%s test database for alias %s..." % ( + action, + self._get_database_display_str(verbosity, test_database_name), + )) + + # We could skip this call if keepdb is True, but we instead + # give it the keepdb param. This is to handle the case + # where the test DB doesn't exist, in which case we need to + # create it, then just not destroy it. If we instead skip + # this, we will get an exception. + self._create_test_db(verbosity, autoclobber, keepdb) + + self.connection.close() + settings.DATABASES[self.connection.alias]["NAME"] = test_database_name + self.connection.settings_dict["NAME"] = test_database_name + + # We report migrate messages at one level lower than that requested. + # This ensures we don't get flooded with messages during testing + # (unless you really ask to be flooded). + call_command( + 'migrate', + verbosity=max(verbosity - 1, 0), + interactive=False, + database=self.connection.alias, + run_syncdb=True, + ) + + # We then serialize the current state of the database into a string + # and store it on the connection. This slightly horrific process is so people + # who are testing on databases without transactions or who are using + # a TransactionTestCase still get a clean database on every test run. + if serialize: + self.connection._test_serialized_contents = self.serialize_db_to_string() + + call_command('createcachetable', database=self.connection.alias) + + # Ensure a connection for the side effect of initializing the test database. + self.connection.ensure_connection() + + return test_database_name + + def set_as_test_mirror(self, primary_settings_dict): + """ + Set this database up to be used in testing as a mirror of a primary + database whose settings are given. + """ + self.connection.settings_dict['NAME'] = primary_settings_dict['NAME'] + + def serialize_db_to_string(self): + """ + Serialize all data in the database into a JSON string. + Designed only for test runner usage; will not handle large + amounts of data. + """ + # Build list of all apps to serialize + from django.db.migrations.loader import MigrationLoader + loader = MigrationLoader(self.connection) + app_list = [] + for app_config in apps.get_app_configs(): + if ( + app_config.models_module is not None and + app_config.label in loader.migrated_apps and + app_config.name not in settings.TEST_NON_SERIALIZED_APPS + ): + app_list.append((app_config, None)) + + # Make a function to iteratively return every object + def get_objects(): + for model in serializers.sort_dependencies(app_list): + if (model._meta.can_migrate(self.connection) and + router.allow_migrate_model(self.connection.alias, model)): + queryset = model._default_manager.using(self.connection.alias).order_by(model._meta.pk.name) + yield from queryset.iterator() + # Serialize to a string + out = StringIO() + serializers.serialize("json", get_objects(), indent=None, stream=out) + return out.getvalue() + + def deserialize_db_from_string(self, data): + """ + Reload the database with data from a string generated by + the serialize_db_to_string() method. + """ + data = StringIO(data) + for obj in serializers.deserialize("json", data, using=self.connection.alias): + obj.save() + + def _get_database_display_str(self, verbosity, database_name): + """ + Return display string for a database for use in various actions. + """ + return "'%s'%s" % ( + self.connection.alias, + (" ('%s')" % database_name) if verbosity >= 2 else '', + ) + + def _get_test_db_name(self): + """ + Internal implementation - return the name of the test DB that will be + created. Only useful when called from create_test_db() and + _create_test_db() and when no external munging is done with the 'NAME' + settings. + """ + if self.connection.settings_dict['TEST']['NAME']: + return self.connection.settings_dict['TEST']['NAME'] + return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + + def _execute_create_test_db(self, cursor, parameters, keepdb=False): + cursor.execute('CREATE DATABASE %(dbname)s %(suffix)s' % parameters) + + def _create_test_db(self, verbosity, autoclobber, keepdb=False): + """ + Internal implementation - create the test db tables. + """ + test_database_name = self._get_test_db_name() + test_db_params = { + 'dbname': self.connection.ops.quote_name(test_database_name), + 'suffix': self.sql_table_creation_suffix(), + } + # Create the test database and connect to it. + with self._nodb_connection.cursor() as cursor: + try: + self._execute_create_test_db(cursor, test_db_params, keepdb) + except Exception as e: + # if we want to keep the db, then no need to do any of the below, + # just return and skip it all. + if keepdb: + return test_database_name + + sys.stderr.write( + "Got an error creating the test database: %s\n" % e) + if not autoclobber: + confirm = input( + "Type 'yes' if you would like to try deleting the test " + "database '%s', or 'no' to cancel: " % test_database_name) + if autoclobber or confirm == 'yes': + try: + if verbosity >= 1: + print("Destroying old test database for alias %s..." % ( + self._get_database_display_str(verbosity, test_database_name), + )) + cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) + self._execute_create_test_db(cursor, test_db_params, keepdb) + except Exception as e: + sys.stderr.write( + "Got an error recreating the test database: %s\n" % e) + sys.exit(2) + else: + print("Tests cancelled.") + sys.exit(1) + + return test_database_name + + def clone_test_db(self, suffix, verbosity=1, autoclobber=False, keepdb=False): + """ + Clone a test database. + """ + source_database_name = self.connection.settings_dict['NAME'] + + if verbosity >= 1: + action = 'Cloning test database' + if keepdb: + action = 'Using existing clone' + print("%s for alias %s..." % ( + action, + self._get_database_display_str(verbosity, source_database_name), + )) + + # We could skip this call if keepdb is True, but we instead + # give it the keepdb param. See create_test_db for details. + self._clone_test_db(suffix, verbosity, keepdb) + + def get_test_db_clone_settings(self, suffix): + """ + Return a modified connection settings dict for the n-th clone of a DB. + """ + # When this function is called, the test database has been created + # already and its name has been copied to settings_dict['NAME'] so + # we don't need to call _get_test_db_name. + orig_settings_dict = self.connection.settings_dict + new_settings_dict = orig_settings_dict.copy() + new_settings_dict['NAME'] = '{}_{}'.format(orig_settings_dict['NAME'], suffix) + return new_settings_dict + + def _clone_test_db(self, suffix, verbosity, keepdb=False): + """ + Internal implementation - duplicate the test db tables. + """ + raise NotImplementedError( + "The database backend doesn't support cloning databases. " + "Disable the option to run tests in parallel processes.") + + def destroy_test_db(self, old_database_name=None, verbosity=1, keepdb=False, suffix=None): + """ + Destroy a test database, prompting the user for confirmation if the + database already exists. + """ + self.connection.close() + if suffix is None: + test_database_name = self.connection.settings_dict['NAME'] + else: + test_database_name = self.get_test_db_clone_settings(suffix)['NAME'] + + if verbosity >= 1: + action = 'Destroying' + if keepdb: + action = 'Preserving' + print("%s test database for alias %s..." % ( + action, + self._get_database_display_str(verbosity, test_database_name), + )) + + # if we want to preserve the database + # skip the actual destroying piece. + if not keepdb: + self._destroy_test_db(test_database_name, verbosity) + + # Restore the original database name + if old_database_name is not None: + settings.DATABASES[self.connection.alias]["NAME"] = old_database_name + self.connection.settings_dict["NAME"] = old_database_name + + def _destroy_test_db(self, test_database_name, verbosity): + """ + Internal implementation - remove the test db tables. + """ + # Remove the test database to clean up after + # ourselves. Connect to the previous database (not the test database) + # to do so, because it's not allowed to delete a database while being + # connected to it. + with self.connection._nodb_connection.cursor() as cursor: + cursor.execute("DROP DATABASE %s" + % self.connection.ops.quote_name(test_database_name)) + + def sql_table_creation_suffix(self): + """ + SQL to append to the end of the test table creation statements. + """ + return '' + + def test_db_signature(self): + """ + Return a tuple with elements of self.connection.settings_dict (a + DATABASES setting value) that uniquely identify a database + accordingly to the RDBMS particularities. + """ + settings_dict = self.connection.settings_dict + return ( + settings_dict['HOST'], + settings_dict['PORT'], + settings_dict['ENGINE'], + self._get_test_db_name(), + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/features.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/features.py new file mode 100644 index 0000000000000000000000000000000000000000..1d431b5e990535412b9397b668fba00fd9aa1851 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/features.py @@ -0,0 +1,294 @@ +from django.db.models.aggregates import StdDev +from django.db.utils import ProgrammingError +from django.utils.functional import cached_property + + +class BaseDatabaseFeatures: + gis_enabled = False + allows_group_by_pk = False + allows_group_by_selected_pks = False + empty_fetchmany_value = [] + update_can_self_select = True + + # Does the backend distinguish between '' and None? + interprets_empty_strings_as_nulls = False + + # Does the backend allow inserting duplicate NULL rows in a nullable + # unique field? All core backends implement this correctly, but other + # databases such as SQL Server do not. + supports_nullable_unique_constraints = True + + # Does the backend allow inserting duplicate rows when a unique_together + # constraint exists and some fields are nullable but not all of them? + supports_partially_nullable_unique_constraints = True + + can_use_chunked_reads = True + can_return_id_from_insert = False + can_return_ids_from_bulk_insert = False + has_bulk_insert = True + uses_savepoints = False + can_release_savepoints = False + + # If True, don't use integer foreign keys referring to, e.g., positive + # integer primary keys. + related_fields_match_type = False + allow_sliced_subqueries = True + has_select_for_update = False + has_select_for_update_nowait = False + has_select_for_update_skip_locked = False + has_select_for_update_of = False + # Does the database's SELECT FOR UPDATE OF syntax require a column rather + # than a table? + select_for_update_of_column = False + + # Does the default test database allow multiple connections? + # Usually an indication that the test database is in-memory + test_db_allows_multiple_connections = True + + # Can an object be saved without an explicit primary key? + supports_unspecified_pk = False + + # Can a fixture contain forward references? i.e., are + # FK constraints checked at the end of transaction, or + # at the end of each save operation? + supports_forward_references = True + + # Does the backend truncate names properly when they are too long? + truncates_names = False + + # Is there a REAL datatype in addition to floats/doubles? + has_real_datatype = False + supports_subqueries_in_group_by = True + + # Is there a true datatype for uuid? + has_native_uuid_field = False + + # Is there a true datatype for timedeltas? + has_native_duration_field = False + + # Does the database driver supports same type temporal data subtraction + # by returning the type used to store duration field? + supports_temporal_subtraction = False + + # Do time/datetime fields have microsecond precision? + supports_microsecond_precision = True + + # Does the __regex lookup support backreferencing and grouping? + supports_regex_backreferencing = True + + # Can date/datetime lookups be performed using a string? + supports_date_lookup_using_string = True + + # Can datetimes with timezones be used? + supports_timezones = True + + # Does the database have a copy of the zoneinfo database? + has_zoneinfo_database = True + + # When performing a GROUP BY, is an ORDER BY NULL required + # to remove any ordering? + requires_explicit_null_ordering_when_grouping = False + + # Does the backend order NULL values as largest or smallest? + nulls_order_largest = False + + # The database's limit on the number of query parameters. + max_query_params = None + + # Can an object have an autoincrement primary key of 0? MySQL says No. + allows_auto_pk_0 = True + + # Do we need to NULL a ForeignKey out, or can the constraint check be + # deferred + can_defer_constraint_checks = False + + # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas + supports_mixed_date_datetime_comparisons = True + + # Does the backend support tablespaces? Default to False because it isn't + # in the SQL standard. + supports_tablespaces = False + + # Does the backend reset sequences between tests? + supports_sequence_reset = True + + # Can the backend determine reliably if a field is nullable? + # Note that this is separate from interprets_empty_strings_as_nulls, + # although the latter feature, when true, interferes with correct + # setting (and introspection) of CharFields' nullability. + # This is True for all core backends. + can_introspect_null = True + + # Can the backend introspect the default value of a column? + can_introspect_default = True + + # Confirm support for introspected foreign keys + # Every database can do this reliably, except MySQL, + # which can't do it for MyISAM tables + can_introspect_foreign_keys = True + + # Can the backend introspect an AutoField, instead of an IntegerField? + can_introspect_autofield = False + + # Can the backend introspect a BigIntegerField, instead of an IntegerField? + can_introspect_big_integer_field = True + + # Can the backend introspect an BinaryField, instead of an TextField? + can_introspect_binary_field = True + + # Can the backend introspect an DecimalField, instead of an FloatField? + can_introspect_decimal_field = True + + # Can the backend introspect an IPAddressField, instead of an CharField? + can_introspect_ip_address_field = False + + # Can the backend introspect a PositiveIntegerField, instead of an IntegerField? + can_introspect_positive_integer_field = False + + # Can the backend introspect a SmallIntegerField, instead of an IntegerField? + can_introspect_small_integer_field = False + + # Can the backend introspect a TimeField, instead of a DateTimeField? + can_introspect_time_field = True + + # Can the backend introspect the column order (ASC/DESC) for indexes? + supports_index_column_ordering = True + + # Support for the DISTINCT ON clause + can_distinct_on_fields = False + + # Does the backend decide to commit before SAVEPOINT statements + # when autocommit is disabled? http://bugs.python.org/issue8145#msg109965 + autocommits_when_autocommit_is_off = False + + # Does the backend prevent running SQL queries in broken transactions? + atomic_transactions = True + + # Can we roll back DDL in a transaction? + can_rollback_ddl = False + + # Does it support operations requiring references rename in a transaction? + supports_atomic_references_rename = True + + # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? + supports_combined_alters = False + + # Does it support foreign keys? + supports_foreign_keys = True + + # Does it support CHECK constraints? + supports_column_check_constraints = True + + # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) + # parameter passing? Note this can be provided by the backend even if not + # supported by the Python driver + supports_paramstyle_pyformat = True + + # Does the backend require literal defaults, rather than parameterized ones? + requires_literal_defaults = False + + # Does the backend require a connection reset after each material schema change? + connection_persists_old_columns = False + + # What kind of error does the backend throw when accessing closed cursor? + closed_cursor_error_class = ProgrammingError + + # Does 'a' LIKE 'A' match? + has_case_insensitive_like = True + + # Does the backend require the sqlparse library for splitting multi-line + # statements before executing them? + requires_sqlparse_for_splitting = True + + # Suffix for backends that don't support "SELECT xxx;" queries. + bare_select_suffix = '' + + # If NULL is implied on columns without needing to be explicitly specified + implied_column_null = False + + uppercases_column_names = False + + # Does the backend support "select for update" queries with limit (and offset)? + supports_select_for_update_with_limit = True + + # Does the backend ignore null expressions in GREATEST and LEAST queries unless + # every expression is null? + greatest_least_ignores_nulls = False + + # Can the backend clone databases for parallel test execution? + # Defaults to False to allow third-party backends to opt-in. + can_clone_databases = False + + # Does the backend consider table names with different casing to + # be equal? + ignores_table_name_case = False + + # Place FOR UPDATE right after FROM clause. Used on MSSQL. + for_update_after_from = False + + # Combinatorial flags + supports_select_union = True + supports_select_intersection = True + supports_select_difference = True + supports_slicing_ordering_in_compound = False + + # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate + # expressions? + supports_aggregate_filter_clause = False + + # Does the backend support indexing a TextField? + supports_index_on_text_field = True + + # Does the backed support window expressions (expression OVER (...))? + supports_over_clause = False + + # Does the backend support CAST with precision? + supports_cast_with_precision = True + + # SQL to create a procedure for use by the Django test suite. The + # functionality of the procedure isn't important. + create_test_procedure_without_params_sql = None + create_test_procedure_with_int_param_sql = None + + # Does the backend support keyword parameters for cursor.callproc()? + supports_callproc_kwargs = False + + def __init__(self, connection): + self.connection = connection + + @cached_property + def supports_transactions(self): + """Confirm support for transactions.""" + with self.connection.cursor() as cursor: + cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') + self.connection.set_autocommit(False) + cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') + self.connection.rollback() + self.connection.set_autocommit(True) + cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') + count, = cursor.fetchone() + cursor.execute('DROP TABLE ROLLBACK_TEST') + return count == 0 + + @cached_property + def supports_stddev(self): + """Confirm support for STDDEV and related stats functions.""" + try: + self.connection.ops.check_expression_support(StdDev(1)) + return True + except NotImplementedError: + return False + + def introspected_boolean_field_type(self, field=None): + """ + What is the type returned when the backend introspects a BooleanField? + The `field` argument may be used to give further details of the field + to be introspected. + + The return value from this function is compared by tests against actual + introspection results; it should provide expectations, not run an + introspection itself. + """ + if self.can_introspect_null and field and field.null: + return 'NullBooleanField' + return 'BooleanField' diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/introspection.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..a3107c30343304c077ccd961fa8d41050f3685fc --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/introspection.py @@ -0,0 +1,192 @@ +from collections import namedtuple + +# Structure returned by DatabaseIntrospection.get_table_list() +TableInfo = namedtuple('TableInfo', ['name', 'type']) + +# Structure returned by the DB-API cursor.description interface (PEP 249) +FieldInfo = namedtuple('FieldInfo', 'name type_code display_size internal_size precision scale null_ok default') + + +class BaseDatabaseIntrospection: + """Encapsulate backend-specific introspection utilities.""" + data_types_reverse = {} + + def __init__(self, connection): + self.connection = connection + + def get_field_type(self, data_type, description): + """ + Hook for a database backend to use the cursor description to + match a Django field type to a database column. + + For Oracle, the column data_type on its own is insufficient to + distinguish between a FloatField and IntegerField, for example. + """ + return self.data_types_reverse[data_type] + + def table_name_converter(self, name): + """ + Apply a conversion to the name for the purposes of comparison. + + The default table name converter is for case sensitive comparison. + """ + return name + + def column_name_converter(self, name): + """ + Apply a conversion to the column name for the purposes of comparison. + + Use table_name_converter() by default. + """ + return self.table_name_converter(name) + + def table_names(self, cursor=None, include_views=False): + """ + Return a list of names of all tables that exist in the database. + Sort the returned table list by Python's default sorting. Do NOT use + the database's ORDER BY here to avoid subtle differences in sorting + order between databases. + """ + def get_names(cursor): + return sorted(ti.name for ti in self.get_table_list(cursor) + if include_views or ti.type == 't') + if cursor is None: + with self.connection.cursor() as cursor: + return get_names(cursor) + return get_names(cursor) + + def get_table_list(self, cursor): + """ + Return an unsorted list of TableInfo named tuples of all tables and + views that exist in the database. + """ + raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method') + + def django_table_names(self, only_existing=False, include_views=True): + """ + Return a list of all table names that have associated Django models and + are in INSTALLED_APPS. + + If only_existing is True, include only the tables in the database. + """ + from django.apps import apps + from django.db import router + tables = set() + for app_config in apps.get_app_configs(): + for model in router.get_migratable_models(app_config, self.connection.alias): + if not model._meta.managed: + continue + tables.add(model._meta.db_table) + tables.update( + f.m2m_db_table() for f in model._meta.local_many_to_many + if f.remote_field.through._meta.managed + ) + tables = list(tables) + if only_existing: + existing_tables = self.table_names(include_views=include_views) + tables = [ + t + for t in tables + if self.table_name_converter(t) in existing_tables + ] + return tables + + def installed_models(self, tables): + """ + Return a set of all models represented by the provided list of table + names. + """ + from django.apps import apps + from django.db import router + all_models = [] + for app_config in apps.get_app_configs(): + all_models.extend(router.get_migratable_models(app_config, self.connection.alias)) + tables = list(map(self.table_name_converter, tables)) + return { + m for m in all_models + if self.table_name_converter(m._meta.db_table) in tables + } + + def sequence_list(self): + """ + Return a list of information about all DB sequences for all models in + all apps. + """ + from django.apps import apps + from django.db import router + + sequence_list = [] + cursor = self.connection.cursor() + + for app_config in apps.get_app_configs(): + for model in router.get_migratable_models(app_config, self.connection.alias): + if not model._meta.managed: + continue + if model._meta.swapped: + continue + sequence_list.extend(self.get_sequences(cursor, model._meta.db_table, model._meta.local_fields)) + for f in model._meta.local_many_to_many: + # If this is an m2m using an intermediate table, + # we don't need to reset the sequence. + if f.remote_field.through is None: + sequence = self.get_sequences(cursor, f.m2m_db_table()) + sequence_list.extend(sequence if sequence else [{'table': f.m2m_db_table(), 'column': None}]) + return sequence_list + + def get_sequences(self, cursor, table_name, table_fields=()): + """ + Return a list of introspected sequences for table_name. Each sequence + is a dict: {'table': , 'column': }. An optional + 'name' key can be added if the backend supports named sequences. + """ + raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_sequences() method') + + def get_key_columns(self, cursor, table_name): + """ + Backends can override this to return a list of: + (column_name, referenced_table_name, referenced_column_name) + for all key columns in given table. + """ + raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method') + + def get_primary_key_column(self, cursor, table_name): + """ + Return the name of the primary key column for the given table. + """ + for constraint in self.get_constraints(cursor, table_name).values(): + if constraint['primary_key']: + return constraint['columns'][0] + return None + + def get_indexes(self, cursor, table_name): + """ + Deprecated in Django 1.11, use get_constraints instead. + Return a dictionary of indexed fieldname -> infodict for the given + table, where each infodict is in the format: + {'primary_key': boolean representing whether it's the primary key, + 'unique': boolean representing whether it's a unique index} + + Only single-column indexes are introspected. + """ + raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_indexes() method') + + def get_constraints(self, cursor, table_name): + """ + Retrieve any constraints or keys (unique, pk, fk, check, index) + across one or more columns. + + Return a dict mapping constraint names to their attributes, + where attributes is a dict with keys: + * columns: List of columns this covers + * primary_key: True if primary key, False otherwise + * unique: True if this is a unique constraint, False otherwise + * foreign_key: (table, column) of target, or None + * check: True if check constraint, False otherwise + * index: True if index, False otherwise. + * orders: The order (ASC/DESC) defined for the columns of indexes + * type: The type of the index (btree, hash, etc.) + + Some backends may return special constraint names that don't exist + if they don't name constraints of a certain type (e.g. SQLite) + """ + raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/operations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..fd74263a4a29a5d0c6c150696846f7c24c05c13a --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/operations.py @@ -0,0 +1,638 @@ +import datetime +import decimal +from importlib import import_module + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.db import NotSupportedError, transaction +from django.db.backends import utils +from django.utils import timezone +from django.utils.dateparse import parse_duration +from django.utils.encoding import force_text + + +class BaseDatabaseOperations: + """ + Encapsulate backend-specific differences, such as the way a backend + performs ordering or calculates the ID of a recently-inserted row. + """ + compiler_module = "django.db.models.sql.compiler" + + # Integer field safe ranges by `internal_type` as documented + # in docs/ref/models/fields.txt. + integer_field_ranges = { + 'SmallIntegerField': (-32768, 32767), + 'IntegerField': (-2147483648, 2147483647), + 'BigIntegerField': (-9223372036854775808, 9223372036854775807), + 'PositiveSmallIntegerField': (0, 32767), + 'PositiveIntegerField': (0, 2147483647), + } + set_operators = { + 'union': 'UNION', + 'intersection': 'INTERSECT', + 'difference': 'EXCEPT', + } + # Mapping of Field.get_internal_type() (typically the model field's class + # name) to the data type to use for the Cast() function, if different from + # DatabaseWrapper.data_types. + cast_data_types = {} + # CharField data type if the max_length argument isn't provided. + cast_char_field_without_max_length = None + + # Start and end points for window expressions. + PRECEDING = 'PRECEDING' + FOLLOWING = 'FOLLOWING' + UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING + UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING + CURRENT_ROW = 'CURRENT ROW' + + def __init__(self, connection): + self.connection = connection + self._cache = None + + def autoinc_sql(self, table, column): + """ + Return any SQL needed to support auto-incrementing primary keys, or + None if no SQL is necessary. + + This SQL is executed when a table is created. + """ + return None + + def bulk_batch_size(self, fields, objs): + """ + Return the maximum allowed batch size for the backend. The fields + are the fields going to be inserted in the batch, the objs contains + all the objects to be inserted. + """ + return len(objs) + + def cache_key_culling_sql(self): + """ + Return an SQL query that retrieves the first cache key greater than the + n smallest. + + This is used by the 'db' cache backend to determine where to start + culling. + """ + return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" + + def unification_cast_sql(self, output_field): + """ + Given a field instance, return the SQL that casts the result of a union + to that type. The resulting string should contain a '%s' placeholder + for the expression being cast. + """ + return '%s' + + def date_extract_sql(self, lookup_type, field_name): + """ + Given a lookup_type of 'year', 'month', or 'day', return the SQL that + extracts a value from the given date field field_name. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method') + + def date_interval_sql(self, timedelta): + """ + Implement the date interval functionality for expressions. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method') + + def date_trunc_sql(self, lookup_type, field_name): + """ + Given a lookup_type of 'year', 'month', or 'day', return the SQL that + truncates the given date field field_name to a date object with only + the given specificity. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method') + + def datetime_cast_date_sql(self, field_name, tzname): + """ + Return the SQL to cast a datetime value to date value. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method') + + def datetime_cast_time_sql(self, field_name, tzname): + """ + Return the SQL to cast a datetime value to time value. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method') + + def datetime_extract_sql(self, lookup_type, field_name, tzname): + """ + Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or + 'second', return the SQL that extracts a value from the given + datetime field field_name. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method') + + def datetime_trunc_sql(self, lookup_type, field_name, tzname): + """ + Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or + 'second', return the SQL that truncates the given datetime field + field_name to a datetime object with only the given specificity. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method') + + def time_trunc_sql(self, lookup_type, field_name): + """ + Given a lookup_type of 'hour', 'minute' or 'second', return the SQL + that truncates the given time field field_name to a time object with + only the given specificity. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method') + + def time_extract_sql(self, lookup_type, field_name): + """ + Given a lookup_type of 'hour', 'minute', or 'second', return the SQL + that extracts a value from the given time field field_name. + """ + return self.date_extract_sql(lookup_type, field_name) + + def deferrable_sql(self): + """ + Return the SQL to make a constraint "initially deferred" during a + CREATE TABLE statement. + """ + return '' + + def distinct_sql(self, fields): + """ + Return an SQL DISTINCT clause which removes duplicate rows from the + result set. If any fields are given, only check the given fields for + duplicates. + """ + if fields: + raise NotImplementedError('DISTINCT ON fields is not supported by this database backend') + else: + return 'DISTINCT' + + def fetch_returned_insert_id(self, cursor): + """ + Given a cursor object that has just performed an INSERT...RETURNING + statement into a table that has an auto-incrementing ID, return the + newly created ID. + """ + return cursor.fetchone()[0] + + def field_cast_sql(self, db_type, internal_type): + """ + Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type + (e.g. 'GenericIPAddressField'), return the SQL to cast it before using + it in a WHERE statement. The resulting string should contain a '%s' + placeholder for the column being searched against. + """ + return '%s' + + def force_no_ordering(self): + """ + Return a list used in the "ORDER BY" clause to force no ordering at + all. Return an empty list to include nothing in the ordering. + """ + return [] + + def for_update_sql(self, nowait=False, skip_locked=False, of=()): + """ + Return the FOR UPDATE SQL clause to lock rows for an update operation. + """ + return 'FOR UPDATE%s%s%s' % ( + ' OF %s' % ', '.join(of) if of else '', + ' NOWAIT' if nowait else '', + ' SKIP LOCKED' if skip_locked else '', + ) + + def last_executed_query(self, cursor, sql, params): + """ + Return a string of the query last executed by the given cursor, with + placeholders replaced with actual values. + + `sql` is the raw query containing placeholders and `params` is the + sequence of parameters. These are used by default, but this method + exists for database backends to provide a better implementation + according to their own quoting schemes. + """ + # Convert params to contain string values. + def to_string(s): + return force_text(s, strings_only=True, errors='replace') + if isinstance(params, (list, tuple)): + u_params = tuple(to_string(val) for val in params) + elif params is None: + u_params = () + else: + u_params = {to_string(k): to_string(v) for k, v in params.items()} + + return "QUERY = %r - PARAMS = %r" % (sql, u_params) + + def last_insert_id(self, cursor, table_name, pk_name): + """ + Given a cursor object that has just performed an INSERT statement into + a table that has an auto-incrementing ID, return the newly created ID. + + `pk_name` is the name of the primary-key column. + """ + return cursor.lastrowid + + def lookup_cast(self, lookup_type, internal_type=None): + """ + Return the string to use in a query when performing lookups + ("contains", "like", etc.). It should contain a '%s' placeholder for + the column being searched against. + """ + return "%s" + + def max_in_list_size(self): + """ + Return the maximum number of items that can be passed in a single 'IN' + list condition, or None if the backend does not impose a limit. + """ + return None + + def max_name_length(self): + """ + Return the maximum length of table and column names, or None if there + is no limit. + """ + return None + + def no_limit_value(self): + """ + Return the value to use for the LIMIT when we are wanting "LIMIT + infinity". Return None if the limit clause can be omitted in this case. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method') + + def pk_default_value(self): + """ + Return the value to use during an INSERT statement to specify that + the field should use its default value. + """ + return 'DEFAULT' + + def prepare_sql_script(self, sql): + """ + Take an SQL script that may contain multiple lines and return a list + of statements to feed to successive cursor.execute() calls. + + Since few databases are able to process raw SQL scripts in a single + cursor.execute() call and PEP 249 doesn't talk about this use case, + the default implementation is conservative. + """ + try: + import sqlparse + except ImportError: + raise ImproperlyConfigured( + "sqlparse is required if you don't split your SQL " + "statements manually." + ) + else: + return [sqlparse.format(statement, strip_comments=True) + for statement in sqlparse.split(sql) if statement] + + def process_clob(self, value): + """ + Return the value of a CLOB column, for backends that return a locator + object that requires additional processing. + """ + return value + + def return_insert_id(self): + """ + For backends that support returning the last insert ID as part of an + insert query, return the SQL and params to append to the INSERT query. + The returned fragment should contain a format string to hold the + appropriate column. + """ + pass + + def compiler(self, compiler_name): + """ + Return the SQLCompiler class corresponding to the given name, + in the namespace corresponding to the `compiler_module` attribute + on this backend. + """ + if self._cache is None: + self._cache = import_module(self.compiler_module) + return getattr(self._cache, compiler_name) + + def quote_name(self, name): + """ + Return a quoted version of the given table, index, or column name. Do + not quote the given name if it's already been quoted. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method') + + def random_function_sql(self): + """Return an SQL expression that returns a random value.""" + return 'RANDOM()' + + def regex_lookup(self, lookup_type): + """ + Return the string to use in a query when performing regular expression + lookups (using "regex" or "iregex"). It should contain a '%s' + placeholder for the column being searched against. + + If the feature is not supported (or part of it is not supported), raise + NotImplementedError. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method') + + def savepoint_create_sql(self, sid): + """ + Return the SQL for starting a new savepoint. Only required if the + "uses_savepoints" feature is True. The "sid" parameter is a string + for the savepoint id. + """ + return "SAVEPOINT %s" % self.quote_name(sid) + + def savepoint_commit_sql(self, sid): + """ + Return the SQL for committing the given savepoint. + """ + return "RELEASE SAVEPOINT %s" % self.quote_name(sid) + + def savepoint_rollback_sql(self, sid): + """ + Return the SQL for rolling back the given savepoint. + """ + return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid) + + def set_time_zone_sql(self): + """ + Return the SQL that will set the connection's time zone. + + Return '' if the backend doesn't support time zones. + """ + return '' + + def sql_flush(self, style, tables, sequences, allow_cascade=False): + """ + Return a list of SQL statements required to remove all data from + the given database tables (without actually removing the tables + themselves) and the SQL statements required to reset the sequences + passed in `sequences`. + + The `style` argument is a Style object as returned by either + color_style() or no_style() in django.core.management.color. + + The `allow_cascade` argument determines whether truncation may cascade + to tables with foreign keys pointing the tables being truncated. + PostgreSQL requires a cascade even if these tables are empty. + """ + raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method') + + def execute_sql_flush(self, using, sql_list): + """Execute a list of SQL statements to flush the database.""" + with transaction.atomic(using=using, savepoint=self.connection.features.can_rollback_ddl): + with self.connection.cursor() as cursor: + for sql in sql_list: + cursor.execute(sql) + + def sequence_reset_by_name_sql(self, style, sequences): + """ + Return a list of the SQL statements required to reset sequences + passed in `sequences`. + + The `style` argument is a Style object as returned by either + color_style() or no_style() in django.core.management.color. + """ + return [] + + def sequence_reset_sql(self, style, model_list): + """ + Return a list of the SQL statements required to reset sequences for + the given models. + + The `style` argument is a Style object as returned by either + color_style() or no_style() in django.core.management.color. + """ + return [] # No sequence reset required by default. + + def start_transaction_sql(self): + """Return the SQL statement required to start a transaction.""" + return "BEGIN;" + + def end_transaction_sql(self, success=True): + """Return the SQL statement required to end a transaction.""" + if not success: + return "ROLLBACK;" + return "COMMIT;" + + def tablespace_sql(self, tablespace, inline=False): + """ + Return the SQL that will be used in a query to define the tablespace. + + Return '' if the backend doesn't support tablespaces. + + If `inline` is True, append the SQL to a row; otherwise append it to + the entire CREATE TABLE or CREATE INDEX statement. + """ + return '' + + def prep_for_like_query(self, x): + """Prepare a value for use in a LIKE query.""" + return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_") + + # Same as prep_for_like_query(), but called for "iexact" matches, which + # need not necessarily be implemented using "LIKE" in the backend. + prep_for_iexact_query = prep_for_like_query + + def validate_autopk_value(self, value): + """ + Certain backends do not accept some values for "serial" fields + (for example zero in MySQL). Raise a ValueError if the value is + invalid, otherwise return the validated value. + """ + return value + + def adapt_unknown_value(self, value): + """ + Transform a value to something compatible with the backend driver. + + This method only depends on the type of the value. It's designed for + cases where the target type isn't known, such as .raw() SQL queries. + As a consequence it may not work perfectly in all circumstances. + """ + if isinstance(value, datetime.datetime): # must be before date + return self.adapt_datetimefield_value(value) + elif isinstance(value, datetime.date): + return self.adapt_datefield_value(value) + elif isinstance(value, datetime.time): + return self.adapt_timefield_value(value) + elif isinstance(value, decimal.Decimal): + return self.adapt_decimalfield_value(value) + else: + return value + + def adapt_datefield_value(self, value): + """ + Transform a date value to an object compatible with what is expected + by the backend driver for date columns. + """ + if value is None: + return None + return str(value) + + def adapt_datetimefield_value(self, value): + """ + Transform a datetime value to an object compatible with what is expected + by the backend driver for datetime columns. + """ + if value is None: + return None + return str(value) + + def adapt_timefield_value(self, value): + """ + Transform a time value to an object compatible with what is expected + by the backend driver for time columns. + """ + if value is None: + return None + if timezone.is_aware(value): + raise ValueError("Django does not support timezone-aware times.") + return str(value) + + def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): + """ + Transform a decimal.Decimal value to an object compatible with what is + expected by the backend driver for decimal (numeric) columns. + """ + return utils.format_number(value, max_digits, decimal_places) + + def adapt_ipaddressfield_value(self, value): + """ + Transform a string representation of an IP address into the expected + type for the backend driver. + """ + return value or None + + def year_lookup_bounds_for_date_field(self, value): + """ + Return a two-elements list with the lower and upper bound to be used + with a BETWEEN operator to query a DateField value using a year + lookup. + + `value` is an int, containing the looked-up year. + """ + first = datetime.date(value, 1, 1) + second = datetime.date(value, 12, 31) + first = self.adapt_datefield_value(first) + second = self.adapt_datefield_value(second) + return [first, second] + + def year_lookup_bounds_for_datetime_field(self, value): + """ + Return a two-elements list with the lower and upper bound to be used + with a BETWEEN operator to query a DateTimeField value using a year + lookup. + + `value` is an int, containing the looked-up year. + """ + first = datetime.datetime(value, 1, 1) + second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) + if settings.USE_TZ: + tz = timezone.get_current_timezone() + first = timezone.make_aware(first, tz) + second = timezone.make_aware(second, tz) + first = self.adapt_datetimefield_value(first) + second = self.adapt_datetimefield_value(second) + return [first, second] + + def get_db_converters(self, expression): + """ + Return a list of functions needed to convert field data. + + Some field types on some backends do not provide data in the correct + format, this is the hook for converter functions. + """ + return [] + + def convert_durationfield_value(self, value, expression, connection): + if value is not None: + value = str(decimal.Decimal(value) / decimal.Decimal(1000000)) + value = parse_duration(value) + return value + + def check_expression_support(self, expression): + """ + Check that the backend supports the provided expression. + + This is used on specific backends to rule out known expressions + that have problematic or nonexistent implementations. If the + expression has a known problem, the backend should raise + NotImplementedError. + """ + pass + + def combine_expression(self, connector, sub_expressions): + """ + Combine a list of subexpressions into a single expression, using + the provided connecting operator. This is required because operators + can vary between backends (e.g., Oracle with %% and &) and between + subexpression types (e.g., date expressions). + """ + conn = ' %s ' % connector + return conn.join(sub_expressions) + + def combine_duration_expression(self, connector, sub_expressions): + return self.combine_expression(connector, sub_expressions) + + def binary_placeholder_sql(self, value): + """ + Some backends require special syntax to insert binary content (MySQL + for example uses '_binary %s'). + """ + return '%s' + + def modify_insert_params(self, placeholder, params): + """ + Allow modification of insert parameters. Needed for Oracle Spatial + backend due to #10888. + """ + return params + + def integer_field_range(self, internal_type): + """ + Given an integer field internal type (e.g. 'PositiveIntegerField'), + return a tuple of the (min_value, max_value) form representing the + range of the column type bound to the field. + """ + return self.integer_field_ranges[internal_type] + + def subtract_temporals(self, internal_type, lhs, rhs): + if self.connection.features.supports_temporal_subtraction: + lhs_sql, lhs_params = lhs + rhs_sql, rhs_params = rhs + return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params + raise NotImplementedError("This backend does not support %s subtraction." % internal_type) + + def window_frame_start(self, start): + if isinstance(start, int): + if start < 0: + return '%d %s' % (abs(start), self.PRECEDING) + elif start == 0: + return self.CURRENT_ROW + elif start is None: + return self.UNBOUNDED_PRECEDING + raise ValueError("start argument must be a negative integer, zero, or None, but got '%s'." % start) + + def window_frame_end(self, end): + if isinstance(end, int): + if end == 0: + return self.CURRENT_ROW + elif end > 0: + return '%d %s' % (end, self.FOLLOWING) + elif end is None: + return self.UNBOUNDED_FOLLOWING + raise ValueError("end argument must be a positive integer, zero, or None, but got '%s'." % end) + + def window_frame_rows_start_end(self, start=None, end=None): + """ + Return SQL for start and end points in an OVER clause window frame. + """ + if not self.connection.features.supports_over_clause: + raise NotSupportedError('This backend does not support window expressions.') + return self.window_frame_start(start), self.window_frame_end(end) + + def window_frame_range_start_end(self, start=None, end=None): + return self.window_frame_rows_start_end(start, end) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/schema.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5b37b348a0a471112e7dd3339070690b6b64c2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/schema.py @@ -0,0 +1,1060 @@ +import hashlib +import logging +from datetime import datetime + +from django.db.backends.ddl_references import ( + Columns, ForeignKeyName, IndexName, Statement, Table, +) +from django.db.backends.utils import split_identifier +from django.db.models import Index +from django.db.transaction import TransactionManagementError, atomic +from django.utils import timezone +from django.utils.encoding import force_bytes + +logger = logging.getLogger('django.db.backends.schema') + + +def _is_relevant_relation(relation, altered_field): + """ + When altering the given field, must constraints on its model from the given + relation be temporarily dropped? + """ + field = relation.field + if field.many_to_many: + # M2M reverse field + return False + if altered_field.primary_key and field.to_fields == [None]: + # Foreign key constraint on the primary key, which is being altered. + return True + # Is the constraint targeting the field being altered? + return altered_field.name in field.to_fields + + +def _related_non_m2m_objects(old_field, new_field): + # Filter out m2m objects from reverse relations. + # Return (old_relation, new_relation) tuples. + return zip( + (obj for obj in old_field.model._meta.related_objects if _is_relevant_relation(obj, old_field)), + (obj for obj in new_field.model._meta.related_objects if _is_relevant_relation(obj, new_field)) + ) + + +class BaseDatabaseSchemaEditor: + """ + This class and its subclasses are responsible for emitting schema-changing + statements to the databases - model creation/removal/alteration, field + renaming, index fiddling, and so on. + """ + + # Overrideable SQL templates + sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" + sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" + sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" + sql_delete_table = "DROP TABLE %(table)s CASCADE" + + sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" + sql_alter_column = "ALTER TABLE %(table)s %(changes)s" + sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" + sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" + sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" + sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" + sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" + sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" + sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" + sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" + + sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" + sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" + + sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)" + sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" + + sql_create_fk = ( + "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " + "REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s" + ) + sql_create_inline_fk = None + sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" + + sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s" + sql_delete_index = "DROP INDEX %(name)s" + + sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" + sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" + + sql_delete_procedure = 'DROP PROCEDURE %(procedure)s' + + def __init__(self, connection, collect_sql=False, atomic=True): + self.connection = connection + self.collect_sql = collect_sql + if self.collect_sql: + self.collected_sql = [] + self.atomic_migration = self.connection.features.can_rollback_ddl and atomic + + # State-managing methods + + def __enter__(self): + self.deferred_sql = [] + if self.atomic_migration: + self.atomic = atomic(self.connection.alias) + self.atomic.__enter__() + return self + + def __exit__(self, exc_type, exc_value, traceback): + if exc_type is None: + for sql in self.deferred_sql: + self.execute(sql) + if self.atomic_migration: + self.atomic.__exit__(exc_type, exc_value, traceback) + + # Core utility functions + + def execute(self, sql, params=()): + """Execute the given SQL statement, with optional parameters.""" + # Don't perform the transactional DDL check if SQL is being collected + # as it's not going to be executed anyway. + if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl: + raise TransactionManagementError( + "Executing DDL statements while in a transaction on databases " + "that can't perform a rollback is prohibited." + ) + # Account for non-string statement objects. + sql = str(sql) + # Log the command we're running, then run it + logger.debug("%s; (params %r)", sql, params, extra={'params': params, 'sql': sql}) + if self.collect_sql: + ending = "" if sql.endswith(";") else ";" + if params is not None: + self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending) + else: + self.collected_sql.append(sql + ending) + else: + with self.connection.cursor() as cursor: + cursor.execute(sql, params) + + def quote_name(self, name): + return self.connection.ops.quote_name(name) + + @classmethod + def _digest(cls, *args): + """ + Generate a 32-bit digest of a set of arguments that can be used to + shorten identifying names. + """ + h = hashlib.md5() + for arg in args: + h.update(force_bytes(arg)) + return h.hexdigest()[:8] + + # Field <-> database mapping functions + + def column_sql(self, model, field, include_default=False): + """ + Take a field and return its column definition. + The field must already have had set_attributes_from_name() called. + """ + # Get the column's type and use that as the basis of the SQL + db_params = field.db_parameters(connection=self.connection) + sql = db_params['type'] + params = [] + # Check for fields that aren't actually columns (e.g. M2M) + if sql is None: + return None, None + # Work out nullability + null = field.null + # If we were told to include a default value, do so + include_default = include_default and not self.skip_default(field) + if include_default: + default_value = self.effective_default(field) + if default_value is not None: + if self.connection.features.requires_literal_defaults: + # Some databases can't take defaults as a parameter (oracle) + # If this is the case, the individual schema backend should + # implement prepare_default + sql += " DEFAULT %s" % self.prepare_default(default_value) + else: + sql += " DEFAULT %s" + params += [default_value] + # Oracle treats the empty string ('') as null, so coerce the null + # option whenever '' is a possible value. + if (field.empty_strings_allowed and not field.primary_key and + self.connection.features.interprets_empty_strings_as_nulls): + null = True + if null and not self.connection.features.implied_column_null: + sql += " NULL" + elif not null: + sql += " NOT NULL" + # Primary key/unique outputs + if field.primary_key: + sql += " PRIMARY KEY" + elif field.unique: + sql += " UNIQUE" + # Optionally add the tablespace if it's an implicitly indexed column + tablespace = field.db_tablespace or model._meta.db_tablespace + if tablespace and self.connection.features.supports_tablespaces and field.unique: + sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True) + # Return the sql + return sql, params + + def skip_default(self, field): + """ + Some backends don't accept default values for certain columns types + (i.e. MySQL longtext and longblob). + """ + return False + + def prepare_default(self, value): + """ + Only used for backends which have requires_literal_defaults feature + """ + raise NotImplementedError( + 'subclasses of BaseDatabaseSchemaEditor for backends which have ' + 'requires_literal_defaults must provide a prepare_default() method' + ) + + def effective_default(self, field): + """Return a field's effective database default value.""" + if field.has_default(): + default = field.get_default() + elif not field.null and field.blank and field.empty_strings_allowed: + if field.get_internal_type() == "BinaryField": + default = bytes() + else: + default = str() + elif getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False): + default = datetime.now() + internal_type = field.get_internal_type() + if internal_type == 'DateField': + default = default.date + elif internal_type == 'TimeField': + default = default.time + elif internal_type == 'DateTimeField': + default = timezone.now + else: + default = None + # If it's a callable, call it + if callable(default): + default = default() + # Run it through the field's get_db_prep_save method so we can send it + # to the database. + default = field.get_db_prep_save(default, self.connection) + return default + + def quote_value(self, value): + """ + Return a quoted version of the value so it's safe to use in an SQL + string. This is not safe against injection from user code; it is + intended only for use in making SQL scripts or preparing default values + for particularly tricky backends (defaults are not user-defined, though, + so this is safe). + """ + raise NotImplementedError() + + # Actions + + def create_model(self, model): + """ + Create a table and any accompanying indexes or unique constraints for + the given `model`. + """ + # Create column SQL, add FK deferreds if needed + column_sqls = [] + params = [] + for field in model._meta.local_fields: + # SQL + definition, extra_params = self.column_sql(model, field) + if definition is None: + continue + # Check constraints can go on the column SQL here + db_params = field.db_parameters(connection=self.connection) + if db_params['check']: + definition += " CHECK (%s)" % db_params['check'] + # Autoincrement SQL (for backends with inline variant) + col_type_suffix = field.db_type_suffix(connection=self.connection) + if col_type_suffix: + definition += " %s" % col_type_suffix + params.extend(extra_params) + # FK + if field.remote_field and field.db_constraint: + to_table = field.remote_field.model._meta.db_table + to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column + if self.sql_create_inline_fk: + definition += " " + self.sql_create_inline_fk % { + "to_table": self.quote_name(to_table), + "to_column": self.quote_name(to_column), + } + elif self.connection.features.supports_foreign_keys: + self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) + # Add the SQL to our big list + column_sqls.append("%s %s" % ( + self.quote_name(field.column), + definition, + )) + # Autoincrement SQL (for backends with post table definition variant) + if field.get_internal_type() in ("AutoField", "BigAutoField"): + autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column) + if autoinc_sql: + self.deferred_sql.extend(autoinc_sql) + + # Add any unique_togethers (always deferred, as some fields might be + # created afterwards, like geometry fields with some backends) + for fields in model._meta.unique_together: + columns = [model._meta.get_field(field).column for field in fields] + self.deferred_sql.append(self._create_unique_sql(model, columns)) + # Make the table + sql = self.sql_create_table % { + "table": self.quote_name(model._meta.db_table), + "definition": ", ".join(column_sqls) + } + if model._meta.db_tablespace: + tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace) + if tablespace_sql: + sql += ' ' + tablespace_sql + # Prevent using [] as params, in the case a literal '%' is used in the definition + self.execute(sql, params or None) + + # Add any field index and index_together's (deferred as SQLite3 _remake_table needs it) + self.deferred_sql.extend(self._model_indexes_sql(model)) + + # Make M2M tables + for field in model._meta.local_many_to_many: + if field.remote_field.through._meta.auto_created: + self.create_model(field.remote_field.through) + + def delete_model(self, model): + """Delete a model from the database.""" + # Handle auto-created intermediary models + for field in model._meta.local_many_to_many: + if field.remote_field.through._meta.auto_created: + self.delete_model(field.remote_field.through) + + # Delete the table + self.execute(self.sql_delete_table % { + "table": self.quote_name(model._meta.db_table), + }) + # Remove all deferred statements referencing the deleted table. + for sql in list(self.deferred_sql): + if isinstance(sql, Statement) and sql.references_table(model._meta.db_table): + self.deferred_sql.remove(sql) + + def add_index(self, model, index): + """Add an index on a model.""" + self.execute(index.create_sql(model, self)) + + def remove_index(self, model, index): + """Remove an index from a model.""" + self.execute(index.remove_sql(model, self)) + + def alter_unique_together(self, model, old_unique_together, new_unique_together): + """ + Deal with a model changing its unique_together. The input + unique_togethers must be doubly-nested, not the single-nested + ["foo", "bar"] format. + """ + olds = {tuple(fields) for fields in old_unique_together} + news = {tuple(fields) for fields in new_unique_together} + # Deleted uniques + for fields in olds.difference(news): + self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique) + # Created uniques + for fields in news.difference(olds): + columns = [model._meta.get_field(field).column for field in fields] + self.execute(self._create_unique_sql(model, columns)) + + def alter_index_together(self, model, old_index_together, new_index_together): + """ + Deal with a model changing its index_together. The input + index_togethers must be doubly-nested, not the single-nested + ["foo", "bar"] format. + """ + olds = {tuple(fields) for fields in old_index_together} + news = {tuple(fields) for fields in new_index_together} + # Deleted indexes + for fields in olds.difference(news): + self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index) + # Created indexes + for field_names in news.difference(olds): + fields = [model._meta.get_field(field) for field in field_names] + self.execute(self._create_index_sql(model, fields, suffix="_idx")) + + def _delete_composed_index(self, model, fields, constraint_kwargs, sql): + columns = [model._meta.get_field(field).column for field in fields] + constraint_names = self._constraint_names(model, columns, **constraint_kwargs) + if len(constraint_names) != 1: + raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % ( + len(constraint_names), + model._meta.db_table, + ", ".join(columns), + )) + self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) + + def alter_db_table(self, model, old_db_table, new_db_table): + """Rename the table a model points to.""" + if (old_db_table == new_db_table or + (self.connection.features.ignores_table_name_case and + old_db_table.lower() == new_db_table.lower())): + return + self.execute(self.sql_rename_table % { + "old_table": self.quote_name(old_db_table), + "new_table": self.quote_name(new_db_table), + }) + # Rename all references to the old table name. + for sql in self.deferred_sql: + if isinstance(sql, Statement): + sql.rename_table_references(old_db_table, new_db_table) + + def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): + """Move a model's table between tablespaces.""" + self.execute(self.sql_retablespace_table % { + "table": self.quote_name(model._meta.db_table), + "old_tablespace": self.quote_name(old_db_tablespace), + "new_tablespace": self.quote_name(new_db_tablespace), + }) + + def add_field(self, model, field): + """ + Create a field on a model. Usually involves adding a column, but may + involve adding a table instead (for M2M fields). + """ + # Special-case implicit M2M tables + if field.many_to_many and field.remote_field.through._meta.auto_created: + return self.create_model(field.remote_field.through) + # Get the column's definition + definition, params = self.column_sql(model, field, include_default=True) + # It might not actually have a column behind it + if definition is None: + return + # Check constraints can go on the column SQL here + db_params = field.db_parameters(connection=self.connection) + if db_params['check']: + definition += " CHECK (%s)" % db_params['check'] + # Build the SQL and run it + sql = self.sql_create_column % { + "table": self.quote_name(model._meta.db_table), + "column": self.quote_name(field.column), + "definition": definition, + } + self.execute(sql, params) + # Drop the default if we need to + # (Django usually does not use in-database defaults) + if not self.skip_default(field) and self.effective_default(field) is not None: + changes_sql, params = self._alter_column_default_sql(model, None, field, drop=True) + sql = self.sql_alter_column % { + "table": self.quote_name(model._meta.db_table), + "changes": changes_sql, + } + self.execute(sql, params) + # Add an index, if required + self.deferred_sql.extend(self._field_indexes_sql(model, field)) + # Add any FK constraints later + if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint: + self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) + # Reset connection if required + if self.connection.features.connection_persists_old_columns: + self.connection.close() + + def remove_field(self, model, field): + """ + Remove a field from a model. Usually involves deleting a column, + but for M2Ms may involve deleting a table. + """ + # Special-case implicit M2M tables + if field.many_to_many and field.remote_field.through._meta.auto_created: + return self.delete_model(field.remote_field.through) + # It might not actually have a column behind it + if field.db_parameters(connection=self.connection)['type'] is None: + return + # Drop any FK constraints, MySQL requires explicit deletion + if field.remote_field: + fk_names = self._constraint_names(model, [field.column], foreign_key=True) + for fk_name in fk_names: + self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) + # Delete the column + sql = self.sql_delete_column % { + "table": self.quote_name(model._meta.db_table), + "column": self.quote_name(field.column), + } + self.execute(sql) + # Reset connection if required + if self.connection.features.connection_persists_old_columns: + self.connection.close() + # Remove all deferred statements referencing the deleted table. + for sql in list(self.deferred_sql): + if isinstance(sql, Statement) and sql.references_column(model._meta.db_table, field.column): + self.deferred_sql.remove(sql) + + def alter_field(self, model, old_field, new_field, strict=False): + """ + Allow a field's type, uniqueness, nullability, default, column, + constraints, etc. to be modified. + `old_field` is required to compute the necessary changes. + If `strict` is True, raise errors if the old column does not match + `old_field` precisely. + """ + # Ensure this field is even column-based + old_db_params = old_field.db_parameters(connection=self.connection) + old_type = old_db_params['type'] + new_db_params = new_field.db_parameters(connection=self.connection) + new_type = new_db_params['type'] + if ((old_type is None and old_field.remote_field is None) or + (new_type is None and new_field.remote_field is None)): + raise ValueError( + "Cannot alter field %s into %s - they do not properly define " + "db_type (are you using a badly-written custom field?)" % + (old_field, new_field), + ) + elif old_type is None and new_type is None and ( + old_field.remote_field.through and new_field.remote_field.through and + old_field.remote_field.through._meta.auto_created and + new_field.remote_field.through._meta.auto_created): + return self._alter_many_to_many(model, old_field, new_field, strict) + elif old_type is None and new_type is None and ( + old_field.remote_field.through and new_field.remote_field.through and + not old_field.remote_field.through._meta.auto_created and + not new_field.remote_field.through._meta.auto_created): + # Both sides have through models; this is a no-op. + return + elif old_type is None or new_type is None: + raise ValueError( + "Cannot alter field %s into %s - they are not compatible types " + "(you cannot alter to or from M2M fields, or add or remove " + "through= on M2M fields)" % (old_field, new_field) + ) + + self._alter_field(model, old_field, new_field, old_type, new_type, + old_db_params, new_db_params, strict) + + def _alter_field(self, model, old_field, new_field, old_type, new_type, + old_db_params, new_db_params, strict=False): + """Perform a "physical" (non-ManyToMany) field update.""" + # Drop any FK constraints, we'll remake them later + fks_dropped = set() + if old_field.remote_field and old_field.db_constraint: + fk_names = self._constraint_names(model, [old_field.column], foreign_key=True) + if strict and len(fk_names) != 1: + raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % ( + len(fk_names), + model._meta.db_table, + old_field.column, + )) + for fk_name in fk_names: + fks_dropped.add((old_field.column,)) + self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) + # Has unique been removed? + if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)): + # Find the unique constraint for this field + constraint_names = self._constraint_names(model, [old_field.column], unique=True) + if strict and len(constraint_names) != 1: + raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % ( + len(constraint_names), + model._meta.db_table, + old_field.column, + )) + for constraint_name in constraint_names: + self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name)) + # Drop incoming FK constraints if the field is a primary key or unique, + # which might be a to_field target, and things are going to change. + drop_foreign_keys = ( + ( + (old_field.primary_key and new_field.primary_key) or + (old_field.unique and new_field.unique) + ) and old_type != new_type + ) + if drop_foreign_keys: + # '_meta.related_field' also contains M2M reverse fields, these + # will be filtered out + for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): + rel_fk_names = self._constraint_names( + new_rel.related_model, [new_rel.field.column], foreign_key=True + ) + for fk_name in rel_fk_names: + self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name)) + # Removed an index? (no strict check, as multiple indexes are possible) + # Remove indexes if db_index switched to False or a unique constraint + # will now be used in lieu of an index. The following lines from the + # truth table show all True cases; the rest are False: + # + # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique + # ------------------------------------------------------------------------------ + # True | False | False | False + # True | False | False | True + # True | False | True | True + if old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique): + # Find the index for this field + meta_index_names = {index.name for index in model._meta.indexes} + # Retrieve only BTREE indexes since this is what's created with + # db_index=True. + index_names = self._constraint_names(model, [old_field.column], index=True, type_=Index.suffix) + for index_name in index_names: + if index_name in meta_index_names: + # The only way to check if an index was created with + # db_index=True or with Index(['field'], name='foo') + # is to look at its name (refs #28053). + continue + self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) + # Change check constraints? + if old_db_params['check'] != new_db_params['check'] and old_db_params['check']: + constraint_names = self._constraint_names(model, [old_field.column], check=True) + if strict and len(constraint_names) != 1: + raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % ( + len(constraint_names), + model._meta.db_table, + old_field.column, + )) + for constraint_name in constraint_names: + self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name)) + # Have they renamed the column? + if old_field.column != new_field.column: + self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type)) + # Rename all references to the renamed column. + for sql in self.deferred_sql: + if isinstance(sql, Statement): + sql.rename_column_references(model._meta.db_table, old_field.column, new_field.column) + # Next, start accumulating actions to do + actions = [] + null_actions = [] + post_actions = [] + # Type change? + if old_type != new_type: + fragment, other_actions = self._alter_column_type_sql(model, old_field, new_field, new_type) + actions.append(fragment) + post_actions.extend(other_actions) + # When changing a column NULL constraint to NOT NULL with a given + # default value, we need to perform 4 steps: + # 1. Add a default for new incoming writes + # 2. Update existing NULL rows with new default + # 3. Replace NULL constraint with NOT NULL + # 4. Drop the default again. + # Default change? + old_default = self.effective_default(old_field) + new_default = self.effective_default(new_field) + needs_database_default = ( + old_field.null and + not new_field.null and + old_default != new_default and + new_default is not None and + not self.skip_default(new_field) + ) + if needs_database_default: + actions.append(self._alter_column_default_sql(model, old_field, new_field)) + # Nullability change? + if old_field.null != new_field.null: + fragment = self._alter_column_null_sql(model, old_field, new_field) + if fragment: + null_actions.append(fragment) + # Only if we have a default and there is a change from NULL to NOT NULL + four_way_default_alteration = ( + new_field.has_default() and + (old_field.null and not new_field.null) + ) + if actions or null_actions: + if not four_way_default_alteration: + # If we don't have to do a 4-way default alteration we can + # directly run a (NOT) NULL alteration + actions = actions + null_actions + # Combine actions together if we can (e.g. postgres) + if self.connection.features.supports_combined_alters and actions: + sql, params = tuple(zip(*actions)) + actions = [(", ".join(sql), sum(params, []))] + # Apply those actions + for sql, params in actions: + self.execute( + self.sql_alter_column % { + "table": self.quote_name(model._meta.db_table), + "changes": sql, + }, + params, + ) + if four_way_default_alteration: + # Update existing rows with default value + self.execute( + self.sql_update_with_default % { + "table": self.quote_name(model._meta.db_table), + "column": self.quote_name(new_field.column), + "default": "%s", + }, + [new_default], + ) + # Since we didn't run a NOT NULL change before we need to do it + # now + for sql, params in null_actions: + self.execute( + self.sql_alter_column % { + "table": self.quote_name(model._meta.db_table), + "changes": sql, + }, + params, + ) + if post_actions: + for sql, params in post_actions: + self.execute(sql, params) + # Added a unique? + if (not old_field.unique and new_field.unique) or ( + old_field.primary_key and not new_field.primary_key and new_field.unique + ): + self.execute(self._create_unique_sql(model, [new_field.column])) + # Added an index? Add an index if db_index switched to True or a unique + # constraint will no longer be used in lieu of an index. The following + # lines from the truth table show all True cases; the rest are False: + # + # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique + # ------------------------------------------------------------------------------ + # False | False | True | False + # False | True | True | False + # True | True | True | False + if (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique: + self.execute(self._create_index_sql(model, [new_field])) + # Type alteration on primary key? Then we need to alter the column + # referring to us. + rels_to_update = [] + if old_field.primary_key and new_field.primary_key and old_type != new_type: + rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) + # Changed to become primary key? + # Note that we don't detect unsetting of a PK, as we assume another field + # will always come along and replace it. + if not old_field.primary_key and new_field.primary_key: + # First, drop the old PK + self._delete_primary_key(model, strict) + # Make the new one + self.execute( + self.sql_create_pk % { + "table": self.quote_name(model._meta.db_table), + "name": self.quote_name( + self._create_index_name(model._meta.db_table, [new_field.column], suffix="_pk") + ), + "columns": self.quote_name(new_field.column), + } + ) + # Update all referencing columns + rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) + # Handle our type alters on the other end of rels from the PK stuff above + for old_rel, new_rel in rels_to_update: + rel_db_params = new_rel.field.db_parameters(connection=self.connection) + rel_type = rel_db_params['type'] + fragment, other_actions = self._alter_column_type_sql( + new_rel.related_model, old_rel.field, new_rel.field, rel_type + ) + self.execute( + self.sql_alter_column % { + "table": self.quote_name(new_rel.related_model._meta.db_table), + "changes": fragment[0], + }, + fragment[1], + ) + for sql, params in other_actions: + self.execute(sql, params) + # Does it have a foreign key? + if (new_field.remote_field and + (fks_dropped or not old_field.remote_field or not old_field.db_constraint) and + new_field.db_constraint): + self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")) + # Rebuild FKs that pointed to us if we previously had to drop them + if drop_foreign_keys: + for rel in new_field.model._meta.related_objects: + if _is_relevant_relation(rel, new_field) and rel.field.db_constraint: + self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk")) + # Does it have check constraints we need to add? + if old_db_params['check'] != new_db_params['check'] and new_db_params['check']: + self.execute( + self.sql_create_check % { + "table": self.quote_name(model._meta.db_table), + "name": self.quote_name( + self._create_index_name(model._meta.db_table, [new_field.column], suffix="_check") + ), + "column": self.quote_name(new_field.column), + "check": new_db_params['check'], + } + ) + # Drop the default if we need to + # (Django usually does not use in-database defaults) + if needs_database_default: + changes_sql, params = self._alter_column_default_sql(model, old_field, new_field, drop=True) + sql = self.sql_alter_column % { + "table": self.quote_name(model._meta.db_table), + "changes": changes_sql, + } + self.execute(sql, params) + # Reset connection if required + if self.connection.features.connection_persists_old_columns: + self.connection.close() + + def _alter_column_null_sql(self, model, old_field, new_field): + """ + Hook to specialize column null alteration. + + Return a (sql, params) fragment to set a column to null or non-null + as required by new_field, or None if no changes are required. + """ + if (self.connection.features.interprets_empty_strings_as_nulls and + new_field.get_internal_type() in ("CharField", "TextField")): + # The field is nullable in the database anyway, leave it alone. + return + else: + new_db_params = new_field.db_parameters(connection=self.connection) + sql = self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null + return ( + sql % { + 'column': self.quote_name(new_field.column), + 'type': new_db_params['type'], + }, + [], + ) + + def _alter_column_default_sql(self, model, old_field, new_field, drop=False): + """ + Hook to specialize column default alteration. + + Return a (sql, params) fragment to add or drop (depending on the drop + argument) a default to new_field's column. + """ + new_default = self.effective_default(new_field) + default = '%s' + params = [new_default] + + if drop: + params = [] + elif self.connection.features.requires_literal_defaults: + # Some databases (Oracle) can't take defaults as a parameter + # If this is the case, the SchemaEditor for that database should + # implement prepare_default(). + default = self.prepare_default(new_default) + params = [] + + new_db_params = new_field.db_parameters(connection=self.connection) + sql = self.sql_alter_column_no_default if drop else self.sql_alter_column_default + return ( + sql % { + 'column': self.quote_name(new_field.column), + 'type': new_db_params['type'], + 'default': default, + }, + params, + ) + + def _alter_column_type_sql(self, model, old_field, new_field, new_type): + """ + Hook to specialize column type alteration for different backends, + for cases when a creation type is different to an alteration type + (e.g. SERIAL in PostgreSQL, PostGIS fields). + + Return a two-tuple of: an SQL fragment of (sql, params) to insert into + an ALTER TABLE statement and a list of extra (sql, params) tuples to + run once the field is altered. + """ + return ( + ( + self.sql_alter_column_type % { + "column": self.quote_name(new_field.column), + "type": new_type, + }, + [], + ), + [], + ) + + def _alter_many_to_many(self, model, old_field, new_field, strict): + """Alter M2Ms to repoint their to= endpoints.""" + # Rename the through table + if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table: + self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table, + new_field.remote_field.through._meta.db_table) + # Repoint the FK to the other side + self.alter_field( + new_field.remote_field.through, + # We need the field that points to the target model, so we can tell alter_field to change it - + # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) + old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), + new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), + ) + self.alter_field( + new_field.remote_field.through, + # for self-referential models we need to alter field from the other end too + old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), + new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), + ) + + def _create_index_name(self, table_name, column_names, suffix=""): + """ + Generate a unique name for an index/unique constraint. + + The name is divided into 3 parts: the table name, the column names, + and a unique digest and suffix. + """ + _, table_name = split_identifier(table_name) + hash_data = [table_name] + list(column_names) + hash_suffix_part = '%s%s' % (self._digest(*hash_data), suffix) + max_length = self.connection.ops.max_name_length() or 200 + # If everything fits into max_length, use that name. + index_name = '%s_%s_%s' % (table_name, '_'.join(column_names), hash_suffix_part) + if len(index_name) <= max_length: + return index_name + # Shorten a long suffix. + if len(hash_suffix_part) > max_length / 3: + hash_suffix_part = hash_suffix_part[:max_length // 3] + other_length = (max_length - len(hash_suffix_part)) // 2 - 1 + index_name = '%s_%s_%s' % ( + table_name[:other_length], + '_'.join(column_names)[:other_length], + hash_suffix_part, + ) + # Prepend D if needed to prevent the name from starting with an + # underscore or a number (not permitted on Oracle). + if index_name[0] == "_" or index_name[0].isdigit(): + index_name = "D%s" % index_name[:-1] + return index_name + + def _get_index_tablespace_sql(self, model, fields, db_tablespace=None): + if db_tablespace is None: + if len(fields) == 1 and fields[0].db_tablespace: + db_tablespace = fields[0].db_tablespace + elif model._meta.db_tablespace: + db_tablespace = model._meta.db_tablespace + if db_tablespace is not None: + return ' ' + self.connection.ops.tablespace_sql(db_tablespace) + return '' + + def _create_index_sql(self, model, fields, *, name=None, suffix='', using='', + db_tablespace=None, col_suffixes=(), sql=None): + """ + Return the SQL statement to create the index for one or several fields. + `sql` can be specified if the syntax differs from the standard (GIS + indexes, ...). + """ + tablespace_sql = self._get_index_tablespace_sql(model, fields, db_tablespace=db_tablespace) + columns = [field.column for field in fields] + sql_create_index = sql or self.sql_create_index + table = model._meta.db_table + + def create_index_name(*args, **kwargs): + nonlocal name + if name is None: + name = self._create_index_name(*args, **kwargs) + return self.quote_name(name) + + return Statement( + sql_create_index, + table=Table(table, self.quote_name), + name=IndexName(table, columns, suffix, create_index_name), + using=using, + columns=Columns(table, columns, self.quote_name, col_suffixes=col_suffixes), + extra=tablespace_sql, + ) + + def _model_indexes_sql(self, model): + """ + Return a list of all index SQL statements (field indexes, + index_together, Meta.indexes) for the specified model. + """ + if not model._meta.managed or model._meta.proxy or model._meta.swapped: + return [] + output = [] + for field in model._meta.local_fields: + output.extend(self._field_indexes_sql(model, field)) + + for field_names in model._meta.index_together: + fields = [model._meta.get_field(field) for field in field_names] + output.append(self._create_index_sql(model, fields, suffix="_idx")) + + for index in model._meta.indexes: + output.append(index.create_sql(model, self)) + return output + + def _field_indexes_sql(self, model, field): + """ + Return a list of all index SQL statements for the specified field. + """ + output = [] + if self._field_should_be_indexed(model, field): + output.append(self._create_index_sql(model, [field])) + return output + + def _field_should_be_indexed(self, model, field): + return field.db_index and not field.unique + + def _rename_field_sql(self, table, old_field, new_field, new_type): + return self.sql_rename_column % { + "table": self.quote_name(table), + "old_column": self.quote_name(old_field.column), + "new_column": self.quote_name(new_field.column), + "type": new_type, + } + + def _create_fk_sql(self, model, field, suffix): + from_table = model._meta.db_table + from_column = field.column + _, to_table = split_identifier(field.target_field.model._meta.db_table) + to_column = field.target_field.column + + def create_fk_name(*args, **kwargs): + return self.quote_name(self._create_index_name(*args, **kwargs)) + + return Statement( + self.sql_create_fk, + table=Table(from_table, self.quote_name), + name=ForeignKeyName(from_table, [from_column], to_table, [to_column], suffix, create_fk_name), + column=Columns(from_table, [from_column], self.quote_name), + to_table=Table(field.target_field.model._meta.db_table, self.quote_name), + to_column=Columns(field.target_field.model._meta.db_table, [to_column], self.quote_name), + deferrable=self.connection.ops.deferrable_sql(), + ) + + def _create_unique_sql(self, model, columns): + table = model._meta.db_table + return Statement( + self.sql_create_unique, + table=Table(table, self.quote_name), + name=IndexName(table, columns, '_uniq', self._create_index_name), + columns=Columns(table, columns, self.quote_name), + ) + + def _delete_constraint_sql(self, template, model, name): + return template % { + "table": self.quote_name(model._meta.db_table), + "name": self.quote_name(name), + } + + def _constraint_names(self, model, column_names=None, unique=None, + primary_key=None, index=None, foreign_key=None, + check=None, type_=None): + """Return all constraint names matching the columns and conditions.""" + if column_names is not None: + column_names = [ + self.connection.introspection.column_name_converter(name) + for name in column_names + ] + with self.connection.cursor() as cursor: + constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table) + result = [] + for name, infodict in constraints.items(): + if column_names is None or column_names == infodict['columns']: + if unique is not None and infodict['unique'] != unique: + continue + if primary_key is not None and infodict['primary_key'] != primary_key: + continue + if index is not None and infodict['index'] != index: + continue + if check is not None and infodict['check'] != check: + continue + if foreign_key is not None and not infodict['foreign_key']: + continue + if type_ is not None and infodict['type'] != type_: + continue + result.append(name) + return result + + def _delete_primary_key(self, model, strict=False): + constraint_names = self._constraint_names(model, primary_key=True) + if strict and len(constraint_names) != 1: + raise ValueError('Found wrong number (%s) of PK constraints for %s' % ( + len(constraint_names), + model._meta.db_table, + )) + for constraint_name in constraint_names: + self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name)) + + def remove_procedure(self, procedure_name, param_types=()): + sql = self.sql_delete_procedure % { + 'procedure': self.quote_name(procedure_name), + 'param_types': ','.join(param_types), + } + self.execute(sql) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/validation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/validation.py new file mode 100644 index 0000000000000000000000000000000000000000..a02780a6947b0d164adbcf26dfc8a43433a65b07 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/base/validation.py @@ -0,0 +1,25 @@ +class BaseDatabaseValidation: + """Encapsulate backend-specific validation.""" + def __init__(self, connection): + self.connection = connection + + def check(self, **kwargs): + return [] + + def check_field(self, field, **kwargs): + errors = [] + # Backends may implement a check_field_type() method. + if (hasattr(self, 'check_field_type') and + # Ignore any related fields. + not getattr(field, 'remote_field', None)): + # Ignore fields with unsupported features. + db_supports_all_required_features = all( + getattr(self.connection.features, feature, False) + for feature in field.model._meta.required_db_features + ) + if db_supports_all_required_features: + field_type = field.db_type(self.connection) + # Ignore non-concrete fields. + if field_type is not None: + errors.extend(self.check_field_type(field, field_type)) + return errors diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/ddl_references.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/ddl_references.py new file mode 100644 index 0000000000000000000000000000000000000000..b894d587934bb6845a2810f445c6a3e177ef94f2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/ddl_references.py @@ -0,0 +1,175 @@ +""" +Helpers to manipulate deferred DDL statements that might need to be adjusted or +discarded within when executing a migration. +""" + + +class Reference: + """Base class that defines the reference interface.""" + + def references_table(self, table): + """ + Return whether or not this instance references the specified table. + """ + return False + + def references_column(self, table, column): + """ + Return whether or not this instance references the specified column. + """ + return False + + def rename_table_references(self, old_table, new_table): + """ + Rename all references to the old_name to the new_table. + """ + pass + + def rename_column_references(self, table, old_column, new_column): + """ + Rename all references to the old_column to the new_column. + """ + pass + + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, str(self)) + + def __str__(self): + raise NotImplementedError('Subclasses must define how they should be converted to string.') + + +class Table(Reference): + """Hold a reference to a table.""" + + def __init__(self, table, quote_name): + self.table = table + self.quote_name = quote_name + + def references_table(self, table): + return self.table == table + + def rename_table_references(self, old_table, new_table): + if self.table == old_table: + self.table = new_table + + def __str__(self): + return self.quote_name(self.table) + + +class TableColumns(Table): + """Base class for references to multiple columns of a table.""" + + def __init__(self, table, columns): + self.table = table + self.columns = columns + + def references_column(self, table, column): + return self.table == table and column in self.columns + + def rename_column_references(self, table, old_column, new_column): + if self.table == table: + for index, column in enumerate(self.columns): + if column == old_column: + self.columns[index] = new_column + + +class Columns(TableColumns): + """Hold a reference to one or many columns.""" + + def __init__(self, table, columns, quote_name, col_suffixes=()): + self.quote_name = quote_name + self.col_suffixes = col_suffixes + super().__init__(table, columns) + + def __str__(self): + def col_str(column, idx): + try: + return self.quote_name(column) + self.col_suffixes[idx] + except IndexError: + return self.quote_name(column) + + return ', '.join(col_str(column, idx) for idx, column in enumerate(self.columns)) + + +class IndexName(TableColumns): + """Hold a reference to an index name.""" + + def __init__(self, table, columns, suffix, create_index_name): + self.suffix = suffix + self.create_index_name = create_index_name + super().__init__(table, columns) + + def __str__(self): + return self.create_index_name(self.table, self.columns, self.suffix) + + +class ForeignKeyName(TableColumns): + """Hold a reference to a foreign key name.""" + + def __init__(self, from_table, from_columns, to_table, to_columns, suffix_template, create_fk_name): + self.to_reference = TableColumns(to_table, to_columns) + self.suffix_template = suffix_template + self.create_fk_name = create_fk_name + super().__init__(from_table, from_columns,) + + def references_table(self, table): + return super().references_table(table) or self.to_reference.references_table(table) + + def references_column(self, table, column): + return ( + super().references_column(table, column) or + self.to_reference.references_column(table, column) + ) + + def rename_table_references(self, old_table, new_table): + super().rename_table_references(old_table, new_table) + self.to_reference.rename_table_references(old_table, new_table) + + def rename_column_references(self, table, old_column, new_column): + super().rename_column_references(table, old_column, new_column) + self.to_reference.rename_column_references(table, old_column, new_column) + + def __str__(self): + suffix = self.suffix_template % { + 'to_table': self.to_reference.table, + 'to_column': self.to_reference.columns[0], + } + return self.create_fk_name(self.table, self.columns, suffix) + + +class Statement(Reference): + """ + Statement template and formatting parameters container. + + Allows keeping a reference to a statement without interpolating identifiers + that might have to be adjusted if they're referencing a table or column + that is removed + """ + def __init__(self, template, **parts): + self.template = template + self.parts = parts + + def references_table(self, table): + return any( + hasattr(part, 'references_table') and part.references_table(table) + for part in self.parts.values() + ) + + def references_column(self, table, column): + return any( + hasattr(part, 'references_column') and part.references_column(table, column) + for part in self.parts.values() + ) + + def rename_table_references(self, old_table, new_table): + for part in self.parts.values(): + if hasattr(part, 'rename_table_references'): + part.rename_table_references(old_table, new_table) + + def rename_column_references(self, table, old_column, new_column): + for part in self.parts.values(): + if hasattr(part, 'rename_column_references'): + part.rename_column_references(table, old_column, new_column) + + def __str__(self): + return self.template % self.parts diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/dummy/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/dummy/base.py new file mode 100644 index 0000000000000000000000000000000000000000..c6a533e8421024bdc5d0c48c5f9bfa62840ebb16 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/dummy/base.py @@ -0,0 +1,73 @@ +""" +Dummy database backend for Django. + +Django uses this if the database ENGINE setting is empty (None or empty string). + +Each of these API functions, except connection.close(), raise +ImproperlyConfigured. +""" + +from django.core.exceptions import ImproperlyConfigured +from django.db.backends.base.base import BaseDatabaseWrapper +from django.db.backends.base.client import BaseDatabaseClient +from django.db.backends.base.creation import BaseDatabaseCreation +from django.db.backends.base.introspection import BaseDatabaseIntrospection +from django.db.backends.base.operations import BaseDatabaseOperations +from django.db.backends.dummy.features import DummyDatabaseFeatures + + +def complain(*args, **kwargs): + raise ImproperlyConfigured("settings.DATABASES is improperly configured. " + "Please supply the ENGINE value. Check " + "settings documentation for more details.") + + +def ignore(*args, **kwargs): + pass + + +class DatabaseOperations(BaseDatabaseOperations): + quote_name = complain + + +class DatabaseClient(BaseDatabaseClient): + runshell = complain + + +class DatabaseCreation(BaseDatabaseCreation): + create_test_db = ignore + destroy_test_db = ignore + + +class DatabaseIntrospection(BaseDatabaseIntrospection): + get_table_list = complain + get_table_description = complain + get_relations = complain + get_indexes = complain + get_key_columns = complain + + +class DatabaseWrapper(BaseDatabaseWrapper): + operators = {} + # Override the base class implementations with null + # implementations. Anything that tries to actually + # do something raises complain; anything that tries + # to rollback or undo something raises ignore. + _cursor = complain + ensure_connection = complain + _commit = complain + _rollback = ignore + _close = ignore + _savepoint = ignore + _savepoint_commit = complain + _savepoint_rollback = ignore + _set_autocommit = complain + # Classes instantiated in __init__(). + client_class = DatabaseClient + creation_class = DatabaseCreation + features_class = DummyDatabaseFeatures + introspection_class = DatabaseIntrospection + ops_class = DatabaseOperations + + def is_usable(self): + return True diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/dummy/features.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/dummy/features.py new file mode 100644 index 0000000000000000000000000000000000000000..57a403aca783bfbf670f4310037148e42eebf80d --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/dummy/features.py @@ -0,0 +1,5 @@ +from django.db.backends.base.features import BaseDatabaseFeatures + + +class DummyDatabaseFeatures(BaseDatabaseFeatures): + supports_transactions = False diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/base.py new file mode 100644 index 0000000000000000000000000000000000000000..34e8d41f0395f5493f46bd710e2f0060187f2239 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/base.py @@ -0,0 +1,349 @@ +""" +MySQL database backend for Django. + +Requires mysqlclient: https://pypi.org/project/mysqlclient/ +""" +import re + +from django.core.exceptions import ImproperlyConfigured +from django.db import utils +from django.db.backends import utils as backend_utils +from django.db.backends.base.base import BaseDatabaseWrapper +from django.utils.functional import cached_property + +try: + import MySQLdb as Database +except ImportError as err: + raise ImproperlyConfigured( + 'Error loading MySQLdb module.\n' + 'Did you install mysqlclient?' + ) from err + +from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip +from MySQLdb.converters import conversions # isort:skip + +# Some of these import MySQLdb, so import them after checking if it's installed. +from .client import DatabaseClient # isort:skip +from .creation import DatabaseCreation # isort:skip +from .features import DatabaseFeatures # isort:skip +from .introspection import DatabaseIntrospection # isort:skip +from .operations import DatabaseOperations # isort:skip +from .schema import DatabaseSchemaEditor # isort:skip +from .validation import DatabaseValidation # isort:skip + +version = Database.version_info +if version < (1, 3, 3): + raise ImproperlyConfigured("mysqlclient 1.3.3 or newer is required; you have %s" % Database.__version__) + + +# MySQLdb returns TIME columns as timedelta -- they are more like timedelta in +# terms of actual behavior as they are signed and include days -- and Django +# expects time. +django_conversions = conversions.copy() +django_conversions.update({ + FIELD_TYPE.TIME: backend_utils.typecast_time, +}) + +# This should match the numerical portion of the version numbers (we can treat +# versions like 5.0.24 and 5.0.24a as the same). +server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})') + + +class CursorWrapper: + """ + A thin wrapper around MySQLdb's normal cursor class that catches particular + exception instances and reraises them with the correct types. + + Implemented as a wrapper, rather than a subclass, so that it isn't stuck + to the particular underlying representation returned by Connection.cursor(). + """ + codes_for_integrityerror = ( + 1048, # Column cannot be null + 1690, # BIGINT UNSIGNED value is out of range + ) + + def __init__(self, cursor): + self.cursor = cursor + + def execute(self, query, args=None): + try: + # args is None means no string interpolation + return self.cursor.execute(query, args) + except Database.OperationalError as e: + # Map some error codes to IntegrityError, since they seem to be + # misclassified and Django would prefer the more logical place. + if e.args[0] in self.codes_for_integrityerror: + raise utils.IntegrityError(*tuple(e.args)) + raise + + def executemany(self, query, args): + try: + return self.cursor.executemany(query, args) + except Database.OperationalError as e: + # Map some error codes to IntegrityError, since they seem to be + # misclassified and Django would prefer the more logical place. + if e.args[0] in self.codes_for_integrityerror: + raise utils.IntegrityError(*tuple(e.args)) + raise + + def __getattr__(self, attr): + return getattr(self.cursor, attr) + + def __iter__(self): + return iter(self.cursor) + + +class DatabaseWrapper(BaseDatabaseWrapper): + vendor = 'mysql' + display_name = 'MySQL' + # This dictionary maps Field objects to their associated MySQL column + # types, as strings. Column-type strings can contain format strings; they'll + # be interpolated against the values of Field.__dict__ before being output. + # If a column type is set to None, it won't be included in the output. + _data_types = { + 'AutoField': 'integer AUTO_INCREMENT', + 'BigAutoField': 'bigint AUTO_INCREMENT', + 'BinaryField': 'longblob', + 'BooleanField': 'bool', + 'CharField': 'varchar(%(max_length)s)', + 'DateField': 'date', + 'DateTimeField': 'datetime', + 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)', + 'DurationField': 'bigint', + 'FileField': 'varchar(%(max_length)s)', + 'FilePathField': 'varchar(%(max_length)s)', + 'FloatField': 'double precision', + 'IntegerField': 'integer', + 'BigIntegerField': 'bigint', + 'IPAddressField': 'char(15)', + 'GenericIPAddressField': 'char(39)', + 'NullBooleanField': 'bool', + 'OneToOneField': 'integer', + 'PositiveIntegerField': 'integer UNSIGNED', + 'PositiveSmallIntegerField': 'smallint UNSIGNED', + 'SlugField': 'varchar(%(max_length)s)', + 'SmallIntegerField': 'smallint', + 'TextField': 'longtext', + 'TimeField': 'time', + 'UUIDField': 'char(32)', + } + + @cached_property + def data_types(self): + if self.features.supports_microsecond_precision: + return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)') + else: + return self._data_types + + # For these columns, MySQL doesn't: + # - accept default values and implicitly treats these columns as nullable + # - support a database index + _limited_data_types = ( + 'tinyblob', 'blob', 'mediumblob', 'longblob', 'tinytext', 'text', + 'mediumtext', 'longtext', 'json', + ) + + operators = { + 'exact': '= %s', + 'iexact': 'LIKE %s', + 'contains': 'LIKE BINARY %s', + 'icontains': 'LIKE %s', + 'gt': '> %s', + 'gte': '>= %s', + 'lt': '< %s', + 'lte': '<= %s', + 'startswith': 'LIKE BINARY %s', + 'endswith': 'LIKE BINARY %s', + 'istartswith': 'LIKE %s', + 'iendswith': 'LIKE %s', + } + + # The patterns below are used to generate SQL pattern lookup clauses when + # the right-hand side of the lookup isn't a raw string (it might be an expression + # or the result of a bilateral transformation). + # In those cases, special characters for LIKE operators (e.g. \, *, _) should be + # escaped on database side. + # + # Note: we use str.format() here for readability as '%' is used as a wildcard for + # the LIKE operator. + pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')" + pattern_ops = { + 'contains': "LIKE BINARY CONCAT('%%', {}, '%%')", + 'icontains': "LIKE CONCAT('%%', {}, '%%')", + 'startswith': "LIKE BINARY CONCAT({}, '%%')", + 'istartswith': "LIKE CONCAT({}, '%%')", + 'endswith': "LIKE BINARY CONCAT('%%', {})", + 'iendswith': "LIKE CONCAT('%%', {})", + } + + isolation_levels = { + 'read uncommitted', + 'read committed', + 'repeatable read', + 'serializable', + } + + Database = Database + SchemaEditorClass = DatabaseSchemaEditor + # Classes instantiated in __init__(). + client_class = DatabaseClient + creation_class = DatabaseCreation + features_class = DatabaseFeatures + introspection_class = DatabaseIntrospection + ops_class = DatabaseOperations + validation_class = DatabaseValidation + + def get_connection_params(self): + kwargs = { + 'conv': django_conversions, + 'charset': 'utf8', + } + settings_dict = self.settings_dict + if settings_dict['USER']: + kwargs['user'] = settings_dict['USER'] + if settings_dict['NAME']: + kwargs['db'] = settings_dict['NAME'] + if settings_dict['PASSWORD']: + kwargs['passwd'] = settings_dict['PASSWORD'] + if settings_dict['HOST'].startswith('/'): + kwargs['unix_socket'] = settings_dict['HOST'] + elif settings_dict['HOST']: + kwargs['host'] = settings_dict['HOST'] + if settings_dict['PORT']: + kwargs['port'] = int(settings_dict['PORT']) + # We need the number of potentially affected rows after an + # "UPDATE", not the number of changed rows. + kwargs['client_flag'] = CLIENT.FOUND_ROWS + # Validate the transaction isolation level, if specified. + options = settings_dict['OPTIONS'].copy() + isolation_level = options.pop('isolation_level', 'read committed') + if isolation_level: + isolation_level = isolation_level.lower() + if isolation_level not in self.isolation_levels: + raise ImproperlyConfigured( + "Invalid transaction isolation level '%s' specified.\n" + "Use one of %s, or None." % ( + isolation_level, + ', '.join("'%s'" % s for s in sorted(self.isolation_levels)) + )) + self.isolation_level = isolation_level + kwargs.update(options) + return kwargs + + def get_new_connection(self, conn_params): + return Database.connect(**conn_params) + + def init_connection_state(self): + assignments = [] + if self.features.is_sql_auto_is_null_enabled: + # SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on + # a recently inserted row will return when the field is tested + # for NULL. Disabling this brings this aspect of MySQL in line + # with SQL standards. + assignments.append('SET SQL_AUTO_IS_NULL = 0') + + if self.isolation_level: + assignments.append('SET SESSION TRANSACTION ISOLATION LEVEL %s' % self.isolation_level.upper()) + + if assignments: + with self.cursor() as cursor: + cursor.execute('; '.join(assignments)) + + def create_cursor(self, name=None): + cursor = self.connection.cursor() + return CursorWrapper(cursor) + + def _rollback(self): + try: + BaseDatabaseWrapper._rollback(self) + except Database.NotSupportedError: + pass + + def _set_autocommit(self, autocommit): + with self.wrap_database_errors: + self.connection.autocommit(autocommit) + + def disable_constraint_checking(self): + """ + Disable foreign key checks, primarily for use in adding rows with + forward references. Always return True to indicate constraint checks + need to be re-enabled. + """ + self.cursor().execute('SET foreign_key_checks=0') + return True + + def enable_constraint_checking(self): + """ + Re-enable foreign key checks after they have been disabled. + """ + # Override needs_rollback in case constraint_checks_disabled is + # nested inside transaction.atomic. + self.needs_rollback, needs_rollback = False, self.needs_rollback + try: + self.cursor().execute('SET foreign_key_checks=1') + finally: + self.needs_rollback = needs_rollback + + def check_constraints(self, table_names=None): + """ + Check each table name in `table_names` for rows with invalid foreign + key references. This method is intended to be used in conjunction with + `disable_constraint_checking()` and `enable_constraint_checking()`, to + determine if rows with invalid references were entered while constraint + checks were off. + + Raise an IntegrityError on the first invalid foreign key reference + encountered (if any) and provide detailed information about the + invalid reference in the error message. + + Backends can override this method if they can more directly apply + constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE") + """ + cursor = self.cursor() + if table_names is None: + table_names = self.introspection.table_names(cursor) + for table_name in table_names: + primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) + if not primary_key_column_name: + continue + key_columns = self.introspection.get_key_columns(cursor, table_name) + for column_name, referenced_table_name, referenced_column_name in key_columns: + cursor.execute( + """ + SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING + LEFT JOIN `%s` as REFERRED + ON (REFERRING.`%s` = REFERRED.`%s`) + WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL + """ % ( + primary_key_column_name, column_name, table_name, + referenced_table_name, column_name, referenced_column_name, + column_name, referenced_column_name, + ) + ) + for bad_row in cursor.fetchall(): + raise utils.IntegrityError( + "The row in table '%s' with primary key '%s' has an invalid " + "foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s." + % ( + table_name, bad_row[0], table_name, column_name, + bad_row[1], referenced_table_name, referenced_column_name, + ) + ) + + def is_usable(self): + try: + self.connection.ping() + except Database.Error: + return False + else: + return True + + @cached_property + def mysql_version(self): + with self.temporary_connection() as cursor: + cursor.execute('SELECT VERSION()') + server_info = cursor.fetchone()[0] + match = server_version_re.match(server_info) + if not match: + raise Exception('Unable to determine MySQL version from version string %r' % server_info) + return tuple(int(x) for x in match.groups()) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/client.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/client.py new file mode 100644 index 0000000000000000000000000000000000000000..224bfc3dc67aa256e3a53c9e9a9659b63e662bc7 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/client.py @@ -0,0 +1,48 @@ +import subprocess + +from django.db.backends.base.client import BaseDatabaseClient + + +class DatabaseClient(BaseDatabaseClient): + executable_name = 'mysql' + + @classmethod + def settings_to_cmd_args(cls, settings_dict): + args = [cls.executable_name] + db = settings_dict['OPTIONS'].get('db', settings_dict['NAME']) + user = settings_dict['OPTIONS'].get('user', settings_dict['USER']) + passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD']) + host = settings_dict['OPTIONS'].get('host', settings_dict['HOST']) + port = settings_dict['OPTIONS'].get('port', settings_dict['PORT']) + server_ca = settings_dict['OPTIONS'].get('ssl', {}).get('ca') + client_cert = settings_dict['OPTIONS'].get('ssl', {}).get('cert') + client_key = settings_dict['OPTIONS'].get('ssl', {}).get('key') + defaults_file = settings_dict['OPTIONS'].get('read_default_file') + # Seems to be no good way to set sql_mode with CLI. + + if defaults_file: + args += ["--defaults-file=%s" % defaults_file] + if user: + args += ["--user=%s" % user] + if passwd: + args += ["--password=%s" % passwd] + if host: + if '/' in host: + args += ["--socket=%s" % host] + else: + args += ["--host=%s" % host] + if port: + args += ["--port=%s" % port] + if server_ca: + args += ["--ssl-ca=%s" % server_ca] + if client_cert: + args += ["--ssl-cert=%s" % client_cert] + if client_key: + args += ["--ssl-key=%s" % client_key] + if db: + args += [db] + return args + + def runshell(self): + args = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict) + subprocess.check_call(args) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/compiler.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..fc74ac1991c08f4fc4674a01e34e5dd226fd5a38 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/compiler.py @@ -0,0 +1,25 @@ +from django.db.models.sql import compiler + + +class SQLCompiler(compiler.SQLCompiler): + def as_subquery_condition(self, alias, columns, compiler): + qn = compiler.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + sql, params = self.as_sql() + return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params + + +class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): + pass + + +class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): + pass + + +class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): + pass + + +class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): + pass diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/creation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..190d5cc56446730b30f5b2a7cfa0480dee40a985 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/creation.py @@ -0,0 +1,72 @@ +import subprocess +import sys + +from django.db.backends.base.creation import BaseDatabaseCreation + +from .client import DatabaseClient + + +class DatabaseCreation(BaseDatabaseCreation): + + def sql_table_creation_suffix(self): + suffix = [] + test_settings = self.connection.settings_dict['TEST'] + if test_settings['CHARSET']: + suffix.append('CHARACTER SET %s' % test_settings['CHARSET']) + if test_settings['COLLATION']: + suffix.append('COLLATE %s' % test_settings['COLLATION']) + return ' '.join(suffix) + + def _execute_create_test_db(self, cursor, parameters, keepdb=False): + try: + if keepdb: + # If the database should be kept, add "IF NOT EXISTS" to avoid + # "database exists" error, also temporarily disable "database + # exists" warning. + cursor.execute(''' + SET @_tmp_sql_notes := @@sql_notes, sql_notes = 0; + CREATE DATABASE IF NOT EXISTS %(dbname)s %(suffix)s; + SET sql_notes = @_tmp_sql_notes; + ''' % parameters) + else: + super()._execute_create_test_db(cursor, parameters, keepdb) + except Exception as e: + if len(e.args) < 1 or e.args[0] != 1007: + # All errors except "database exists" (1007) cancel tests. + sys.stderr.write('Got an error creating the test database: %s\n' % e) + sys.exit(2) + else: + raise e + + def _clone_test_db(self, suffix, verbosity, keepdb=False): + source_database_name = self.connection.settings_dict['NAME'] + target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] + test_db_params = { + 'dbname': self.connection.ops.quote_name(target_database_name), + 'suffix': self.sql_table_creation_suffix(), + } + with self._nodb_connection.cursor() as cursor: + try: + self._execute_create_test_db(cursor, test_db_params, keepdb) + except Exception: + try: + if verbosity >= 1: + print("Destroying old test database for alias %s..." % ( + self._get_database_display_str(verbosity, target_database_name), + )) + cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) + self._execute_create_test_db(cursor, test_db_params, keepdb) + except Exception as e: + sys.stderr.write("Got an error recreating the test database: %s\n" % e) + sys.exit(2) + + dump_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict) + dump_cmd[0] = 'mysqldump' + dump_cmd[-1] = source_database_name + load_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict) + load_cmd[-1] = target_database_name + + dump_proc = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE) + load_proc = subprocess.Popen(load_cmd, stdin=dump_proc.stdout, stdout=subprocess.PIPE) + dump_proc.stdout.close() # allow dump_proc to receive a SIGPIPE if load_proc exits. + load_proc.communicate() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/features.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/features.py new file mode 100644 index 0000000000000000000000000000000000000000..18ab088941b67754de209943916b4780979fa929 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/features.py @@ -0,0 +1,100 @@ +from django.db.backends.base.features import BaseDatabaseFeatures +from django.utils.functional import cached_property + + +class DatabaseFeatures(BaseDatabaseFeatures): + empty_fetchmany_value = () + update_can_self_select = False + allows_group_by_pk = True + related_fields_match_type = True + allow_sliced_subqueries = False + has_select_for_update = True + has_select_for_update_nowait = False + supports_forward_references = False + supports_regex_backreferencing = False + supports_date_lookup_using_string = False + can_introspect_autofield = True + can_introspect_binary_field = False + can_introspect_small_integer_field = True + can_introspect_positive_integer_field = True + supports_index_column_ordering = False + supports_timezones = False + requires_explicit_null_ordering_when_grouping = True + allows_auto_pk_0 = False + uses_savepoints = True + can_release_savepoints = True + atomic_transactions = False + supports_column_check_constraints = False + can_clone_databases = True + supports_temporal_subtraction = True + supports_select_intersection = False + supports_select_difference = False + supports_slicing_ordering_in_compound = True + supports_index_on_text_field = False + has_case_insensitive_like = False + create_test_procedure_without_params_sql = """ + CREATE PROCEDURE test_procedure () + BEGIN + DECLARE V_I INTEGER; + SET V_I = 1; + END; + """ + create_test_procedure_with_int_param_sql = """ + CREATE PROCEDURE test_procedure (P_I INTEGER) + BEGIN + DECLARE V_I INTEGER; + SET V_I = P_I; + END; + """ + + @cached_property + def _mysql_storage_engine(self): + "Internal method used in Django tests. Don't rely on this from your code" + with self.connection.cursor() as cursor: + cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'") + result = cursor.fetchone() + return result[0] + + @cached_property + def can_introspect_foreign_keys(self): + "Confirm support for introspected foreign keys" + return self._mysql_storage_engine != 'MyISAM' + + @cached_property + def supports_microsecond_precision(self): + return self.connection.mysql_version >= (5, 6, 4) + + @cached_property + def has_zoneinfo_database(self): + # Test if the time zone definitions are installed. + with self.connection.cursor() as cursor: + cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1") + return cursor.fetchone() is not None + + def introspected_boolean_field_type(self, *args, **kwargs): + return 'IntegerField' + + @cached_property + def is_sql_auto_is_null_enabled(self): + with self.connection.cursor() as cursor: + cursor.execute('SELECT @@SQL_AUTO_IS_NULL') + result = cursor.fetchone() + return result and result[0] == 1 + + @cached_property + def supports_over_clause(self): + return self.connection.mysql_version >= (8, 0, 2) + + @cached_property + def supports_transactions(self): + """ + All storage engines except MyISAM support transactions. + """ + return self._mysql_storage_engine != 'MyISAM' + + @cached_property + def ignores_table_name_case(self): + with self.connection.cursor() as cursor: + cursor.execute('SELECT @@LOWER_CASE_TABLE_NAMES') + result = cursor.fetchone() + return result and result[0] != 0 diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/introspection.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..caa5826e55b7fb69f38045a1c6efbf9281f06993 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/introspection.py @@ -0,0 +1,242 @@ +import warnings +from collections import namedtuple + +from MySQLdb.constants import FIELD_TYPE + +from django.db.backends.base.introspection import ( + BaseDatabaseIntrospection, FieldInfo, TableInfo, +) +from django.db.models.indexes import Index +from django.utils.datastructures import OrderedSet +from django.utils.deprecation import RemovedInDjango21Warning + +FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra', 'is_unsigned')) +InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default is_unsigned') + + +class DatabaseIntrospection(BaseDatabaseIntrospection): + data_types_reverse = { + FIELD_TYPE.BLOB: 'TextField', + FIELD_TYPE.CHAR: 'CharField', + FIELD_TYPE.DECIMAL: 'DecimalField', + FIELD_TYPE.NEWDECIMAL: 'DecimalField', + FIELD_TYPE.DATE: 'DateField', + FIELD_TYPE.DATETIME: 'DateTimeField', + FIELD_TYPE.DOUBLE: 'FloatField', + FIELD_TYPE.FLOAT: 'FloatField', + FIELD_TYPE.INT24: 'IntegerField', + FIELD_TYPE.LONG: 'IntegerField', + FIELD_TYPE.LONGLONG: 'BigIntegerField', + FIELD_TYPE.SHORT: 'SmallIntegerField', + FIELD_TYPE.STRING: 'CharField', + FIELD_TYPE.TIME: 'TimeField', + FIELD_TYPE.TIMESTAMP: 'DateTimeField', + FIELD_TYPE.TINY: 'IntegerField', + FIELD_TYPE.TINY_BLOB: 'TextField', + FIELD_TYPE.MEDIUM_BLOB: 'TextField', + FIELD_TYPE.LONG_BLOB: 'TextField', + FIELD_TYPE.VAR_STRING: 'CharField', + } + + def get_field_type(self, data_type, description): + field_type = super().get_field_type(data_type, description) + if 'auto_increment' in description.extra: + if field_type == 'IntegerField': + return 'AutoField' + elif field_type == 'BigIntegerField': + return 'BigAutoField' + if description.is_unsigned: + if field_type == 'IntegerField': + return 'PositiveIntegerField' + elif field_type == 'SmallIntegerField': + return 'PositiveSmallIntegerField' + return field_type + + def get_table_list(self, cursor): + """Return a list of table and view names in the current database.""" + cursor.execute("SHOW FULL TABLES") + return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1])) + for row in cursor.fetchall()] + + def get_table_description(self, cursor, table_name): + """ + Return a description of the table with the DB-API cursor.description + interface." + """ + # information_schema database gives more accurate results for some figures: + # - varchar length returned by cursor.description is an internal length, + # not visible length (#5725) + # - precision and scale (for decimal fields) (#5014) + # - auto_increment is not available in cursor.description + cursor.execute(""" + SELECT + column_name, data_type, character_maximum_length, + numeric_precision, numeric_scale, extra, column_default, + CASE + WHEN column_type LIKE '%% unsigned' THEN 1 + ELSE 0 + END AS is_unsigned + FROM information_schema.columns + WHERE table_name = %s AND table_schema = DATABASE()""", [table_name]) + field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()} + + cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) + + def to_int(i): + return int(i) if i is not None else i + + fields = [] + for line in cursor.description: + col_name = line[0] + fields.append( + FieldInfo(*( + (col_name,) + + line[1:3] + + ( + to_int(field_info[col_name].max_len) or line[3], + to_int(field_info[col_name].num_prec) or line[4], + to_int(field_info[col_name].num_scale) or line[5], + line[6], + field_info[col_name].column_default, + field_info[col_name].extra, + field_info[col_name].is_unsigned, + ) + )) + ) + return fields + + def get_sequences(self, cursor, table_name, table_fields=()): + for field_info in self.get_table_description(cursor, table_name): + if 'auto_increment' in field_info.extra: + # MySQL allows only one auto-increment column per table. + return [{'table': table_name, 'column': field_info.name}] + return [] + + def get_relations(self, cursor, table_name): + """ + Return a dictionary of {field_name: (field_name_other_table, other_table)} + representing all relationships to the given table. + """ + constraints = self.get_key_columns(cursor, table_name) + relations = {} + for my_fieldname, other_table, other_field in constraints: + relations[my_fieldname] = (other_field, other_table) + return relations + + def get_key_columns(self, cursor, table_name): + """ + Return a list of (column_name, referenced_table_name, referenced_column_name) + for all key columns in the given table. + """ + key_columns = [] + cursor.execute(""" + SELECT column_name, referenced_table_name, referenced_column_name + FROM information_schema.key_column_usage + WHERE table_name = %s + AND table_schema = DATABASE() + AND referenced_table_name IS NOT NULL + AND referenced_column_name IS NOT NULL""", [table_name]) + key_columns.extend(cursor.fetchall()) + return key_columns + + def get_indexes(self, cursor, table_name): + warnings.warn( + "get_indexes() is deprecated in favor of get_constraints().", + RemovedInDjango21Warning, stacklevel=2 + ) + cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)) + # Do a two-pass search for indexes: on first pass check which indexes + # are multicolumn, on second pass check which single-column indexes + # are present. + rows = list(cursor.fetchall()) + multicol_indexes = set() + for row in rows: + if row[3] > 1: + multicol_indexes.add(row[2]) + indexes = {} + for row in rows: + if row[2] in multicol_indexes: + continue + if row[4] not in indexes: + indexes[row[4]] = {'primary_key': False, 'unique': False} + # It's possible to have the unique and PK constraints in separate indexes. + if row[2] == 'PRIMARY': + indexes[row[4]]['primary_key'] = True + if not row[1]: + indexes[row[4]]['unique'] = True + return indexes + + def get_storage_engine(self, cursor, table_name): + """ + Retrieve the storage engine for a given table. Return the default + storage engine if the table doesn't exist. + """ + cursor.execute( + "SELECT engine " + "FROM information_schema.tables " + "WHERE table_name = %s", [table_name]) + result = cursor.fetchone() + if not result: + return self.connection.features._mysql_storage_engine + return result[0] + + def get_constraints(self, cursor, table_name): + """ + Retrieve any constraints or keys (unique, pk, fk, check, index) across + one or more columns. + """ + constraints = {} + # Get the actual constraint names and columns + name_query = """ + SELECT kc.`constraint_name`, kc.`column_name`, + kc.`referenced_table_name`, kc.`referenced_column_name` + FROM information_schema.key_column_usage AS kc + WHERE + kc.table_schema = DATABASE() AND + kc.table_name = %s + """ + cursor.execute(name_query, [table_name]) + for constraint, column, ref_table, ref_column in cursor.fetchall(): + if constraint not in constraints: + constraints[constraint] = { + 'columns': OrderedSet(), + 'primary_key': False, + 'unique': False, + 'index': False, + 'check': False, + 'foreign_key': (ref_table, ref_column) if ref_column else None, + } + constraints[constraint]['columns'].add(column) + # Now get the constraint types + type_query = """ + SELECT c.constraint_name, c.constraint_type + FROM information_schema.table_constraints AS c + WHERE + c.table_schema = DATABASE() AND + c.table_name = %s + """ + cursor.execute(type_query, [table_name]) + for constraint, kind in cursor.fetchall(): + if kind.lower() == "primary key": + constraints[constraint]['primary_key'] = True + constraints[constraint]['unique'] = True + elif kind.lower() == "unique": + constraints[constraint]['unique'] = True + # Now add in the indexes + cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)) + for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]: + if index not in constraints: + constraints[index] = { + 'columns': OrderedSet(), + 'primary_key': False, + 'unique': False, + 'check': False, + 'foreign_key': None, + } + constraints[index]['index'] = True + constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower() + constraints[index]['columns'].add(column) + # Convert the sorted sets to lists + for constraint in constraints.values(): + constraint['columns'] = list(constraint['columns']) + return constraints diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/operations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..d9cb6a27c0a25de188c95cc0a9b8dec0bd63e264 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/operations.py @@ -0,0 +1,285 @@ +import uuid + +from django.conf import settings +from django.db.backends.base.operations import BaseDatabaseOperations +from django.utils import timezone +from django.utils.encoding import force_text + + +class DatabaseOperations(BaseDatabaseOperations): + compiler_module = "django.db.backends.mysql.compiler" + + # MySQL stores positive fields as UNSIGNED ints. + integer_field_ranges = dict( + BaseDatabaseOperations.integer_field_ranges, + PositiveSmallIntegerField=(0, 65535), + PositiveIntegerField=(0, 4294967295), + ) + cast_data_types = { + 'CharField': 'char(%(max_length)s)', + 'IntegerField': 'signed integer', + 'BigIntegerField': 'signed integer', + 'SmallIntegerField': 'signed integer', + 'FloatField': 'signed', + 'PositiveIntegerField': 'unsigned integer', + 'PositiveSmallIntegerField': 'unsigned integer', + } + cast_char_field_without_max_length = 'char' + + def date_extract_sql(self, lookup_type, field_name): + # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html + if lookup_type == 'week_day': + # DAYOFWEEK() returns an integer, 1-7, Sunday=1. + # Note: WEEKDAY() returns 0-6, Monday=0. + return "DAYOFWEEK(%s)" % field_name + elif lookup_type == 'week': + # Override the value of default_week_format for consistency with + # other database backends. + # Mode 3: Monday, 1-53, with 4 or more days this year. + return "WEEK(%s, 3)" % field_name + else: + # EXTRACT returns 1-53 based on ISO-8601 for the week number. + return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) + + def date_trunc_sql(self, lookup_type, field_name): + fields = { + 'year': '%%Y-01-01', + 'month': '%%Y-%%m-01', + } # Use double percents to escape. + if lookup_type in fields: + format_str = fields[lookup_type] + return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str) + elif lookup_type == 'quarter': + return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % ( + field_name, field_name + ) + else: + return "DATE(%s)" % (field_name) + + def _convert_field_to_tz(self, field_name, tzname): + if settings.USE_TZ: + field_name = "CONVERT_TZ(%s, 'UTC', '%s')" % (field_name, tzname) + return field_name + + def datetime_cast_date_sql(self, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return "DATE(%s)" % field_name + + def datetime_cast_time_sql(self, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return "TIME(%s)" % field_name + + def datetime_extract_sql(self, lookup_type, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return self.date_extract_sql(lookup_type, field_name) + + def datetime_trunc_sql(self, lookup_type, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] + format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. + format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') + if lookup_type == 'quarter': + return ( + "CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + " + "INTERVAL QUARTER({field_name}) QUARTER - " + + "INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)" + ).format(field_name=field_name) + try: + i = fields.index(lookup_type) + 1 + except ValueError: + sql = field_name + else: + format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]]) + sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) + return sql + + def time_trunc_sql(self, lookup_type, field_name): + fields = { + 'hour': '%%H:00:00', + 'minute': '%%H:%%i:00', + 'second': '%%H:%%i:%%s', + } # Use double percents to escape. + if lookup_type in fields: + format_str = fields[lookup_type] + return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str) + else: + return "TIME(%s)" % (field_name) + + def date_interval_sql(self, timedelta): + return "INTERVAL '%06f' SECOND_MICROSECOND" % timedelta.total_seconds() + + def format_for_duration_arithmetic(self, sql): + if self.connection.features.supports_microsecond_precision: + return 'INTERVAL %s MICROSECOND' % sql + else: + return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql + + def force_no_ordering(self): + """ + "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped + columns. If no ordering would otherwise be applied, we don't want any + implicit sorting going on. + """ + return [(None, ("NULL", [], False))] + + def last_executed_query(self, cursor, sql, params): + # With MySQLdb, cursor objects have an (undocumented) "_last_executed" + # attribute where the exact query sent to the database is saved. + # See MySQLdb/cursors.py in the source distribution. + return force_text(getattr(cursor, '_last_executed', None), errors='replace') + + def no_limit_value(self): + # 2**64 - 1, as recommended by the MySQL documentation + return 18446744073709551615 + + def quote_name(self, name): + if name.startswith("`") and name.endswith("`"): + return name # Quoting once is enough. + return "`%s`" % name + + def random_function_sql(self): + return 'RAND()' + + def sql_flush(self, style, tables, sequences, allow_cascade=False): + # NB: The generated SQL below is specific to MySQL + # 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements + # to clear all tables of all data + if tables: + sql = ['SET FOREIGN_KEY_CHECKS = 0;'] + for table in tables: + sql.append('%s %s;' % ( + style.SQL_KEYWORD('TRUNCATE'), + style.SQL_FIELD(self.quote_name(table)), + )) + sql.append('SET FOREIGN_KEY_CHECKS = 1;') + sql.extend(self.sequence_reset_by_name_sql(style, sequences)) + return sql + else: + return [] + + def validate_autopk_value(self, value): + # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653. + if value == 0: + raise ValueError('The database backend does not accept 0 as a ' + 'value for AutoField.') + return value + + def adapt_datetimefield_value(self, value): + if value is None: + return None + + # Expression values are adapted by the database. + if hasattr(value, 'resolve_expression'): + return value + + # MySQL doesn't support tz-aware datetimes + if timezone.is_aware(value): + if settings.USE_TZ: + value = timezone.make_naive(value, self.connection.timezone) + else: + raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.") + + if not self.connection.features.supports_microsecond_precision: + value = value.replace(microsecond=0) + + return str(value) + + def adapt_timefield_value(self, value): + if value is None: + return None + + # Expression values are adapted by the database. + if hasattr(value, 'resolve_expression'): + return value + + # MySQL doesn't support tz-aware times + if timezone.is_aware(value): + raise ValueError("MySQL backend does not support timezone-aware times.") + + return str(value) + + def max_name_length(self): + return 64 + + def bulk_insert_sql(self, fields, placeholder_rows): + placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) + values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) + return "VALUES " + values_sql + + def combine_expression(self, connector, sub_expressions): + if connector == '^': + return 'POW(%s)' % ','.join(sub_expressions) + # Convert the result to a signed integer since MySQL's binary operators + # return an unsigned integer. + elif connector in ('&', '|', '<<'): + return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions) + elif connector == '>>': + lhs, rhs = sub_expressions + return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} + return super().combine_expression(connector, sub_expressions) + + def get_db_converters(self, expression): + converters = super().get_db_converters(expression) + internal_type = expression.output_field.get_internal_type() + if internal_type == 'TextField': + converters.append(self.convert_textfield_value) + elif internal_type in ['BooleanField', 'NullBooleanField']: + converters.append(self.convert_booleanfield_value) + elif internal_type == 'DateTimeField': + converters.append(self.convert_datetimefield_value) + elif internal_type == 'UUIDField': + converters.append(self.convert_uuidfield_value) + return converters + + def convert_textfield_value(self, value, expression, connection): + if value is not None: + value = force_text(value) + return value + + def convert_booleanfield_value(self, value, expression, connection): + if value in (0, 1): + value = bool(value) + return value + + def convert_datetimefield_value(self, value, expression, connection): + if value is not None: + if settings.USE_TZ: + value = timezone.make_aware(value, self.connection.timezone) + return value + + def convert_uuidfield_value(self, value, expression, connection): + if value is not None: + value = uuid.UUID(value) + return value + + def binary_placeholder_sql(self, value): + return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s' + + def subtract_temporals(self, internal_type, lhs, rhs): + lhs_sql, lhs_params = lhs + rhs_sql, rhs_params = rhs + if self.connection.features.supports_microsecond_precision: + if internal_type == 'TimeField': + return ( + "((TIME_TO_SEC(%(lhs)s) * POW(10, 6) + MICROSECOND(%(lhs)s)) -" + " (TIME_TO_SEC(%(rhs)s) * POW(10, 6) + MICROSECOND(%(rhs)s)))" + ) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2 + else: + return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params + elif internal_type == 'TimeField': + return ( + "(TIME_TO_SEC(%s) * POW(10, 6) - TIME_TO_SEC(%s) * POW(10, 6))" + ) % (lhs_sql, rhs_sql), lhs_params + rhs_params + else: + return "(TIMESTAMPDIFF(SECOND, %s, %s) * POW(10, 6))" % (rhs_sql, lhs_sql), rhs_params + lhs_params + + def regex_lookup(self, lookup_type): + # REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE + # doesn't exist in MySQL 5.6. + if self.connection.mysql_version < (8, 0, 0): + if lookup_type == 'regex': + return '%s REGEXP BINARY %s' + return '%s REGEXP %s' + + match_option = 'c' if lookup_type == 'regex' else 'i' + return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/schema.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..24abdaf6117fda8a372ab39870b965a58e9b0ad9 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/schema.py @@ -0,0 +1,101 @@ +from django.db.backends.base.schema import BaseDatabaseSchemaEditor +from django.db.models import NOT_PROVIDED + + +class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): + + sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s" + + sql_alter_column_null = "MODIFY %(column)s %(type)s NULL" + sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL" + sql_alter_column_type = "MODIFY %(column)s %(type)s" + + # No 'CASCADE' which works as a no-op in MySQL but is undocumented + sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" + + sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s" + + sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s" + + sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s" + + sql_delete_index = "DROP INDEX %(name)s ON %(table)s" + + sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" + sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY" + + def quote_value(self, value): + # Inner import to allow module to fail to load gracefully + import MySQLdb.converters + return MySQLdb.escape(value, MySQLdb.converters.conversions) + + def _is_limited_data_type(self, field): + db_type = field.db_type(self.connection) + return db_type is not None and db_type.lower() in self.connection._limited_data_types + + def skip_default(self, field): + return self._is_limited_data_type(field) + + def add_field(self, model, field): + super().add_field(model, field) + + # Simulate the effect of a one-off default. + # field.default may be unhashable, so a set isn't used for "in" check. + if self.skip_default(field) and field.default not in (None, NOT_PROVIDED): + effective_default = self.effective_default(field) + self.execute('UPDATE %(table)s SET %(column)s = %%s' % { + 'table': self.quote_name(model._meta.db_table), + 'column': self.quote_name(field.column), + }, [effective_default]) + + def _field_should_be_indexed(self, model, field): + create_index = super()._field_should_be_indexed(model, field) + storage = self.connection.introspection.get_storage_engine( + self.connection.cursor(), model._meta.db_table + ) + # No need to create an index for ForeignKey fields except if + # db_constraint=False because the index from that constraint won't be + # created. + if (storage == "InnoDB" and + create_index and + field.get_internal_type() == 'ForeignKey' and + field.db_constraint): + return False + if self._is_limited_data_type(field): + return False + return create_index + + def _delete_composed_index(self, model, fields, *args): + """ + MySQL can remove an implicit FK index on a field when that field is + covered by another index like a unique_together. "covered" here means + that the more complex index starts like the simpler one. + http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757 + We check here before removing the [unique|index]_together if we have to + recreate a FK index. + """ + first_field = model._meta.get_field(fields[0]) + if first_field.get_internal_type() == 'ForeignKey': + constraint_names = self._constraint_names(model, [first_field.column], index=True) + if not constraint_names: + self.execute(self._create_index_sql(model, [first_field], suffix="")) + return super()._delete_composed_index(model, fields, *args) + + def _set_field_new_type_null_status(self, field, new_type): + """ + Keep the null property of the old field. If it has changed, it will be + handled separately. + """ + if field.null: + new_type += " NULL" + else: + new_type += " NOT NULL" + return new_type + + def _alter_column_type_sql(self, model, old_field, new_field, new_type): + new_type = self._set_field_new_type_null_status(old_field, new_type) + return super()._alter_column_type_sql(model, old_field, new_field, new_type) + + def _rename_field_sql(self, table, old_field, new_field, new_type): + new_type = self._set_field_new_type_null_status(old_field, new_type) + return super()._rename_field_sql(table, old_field, new_field, new_type) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/validation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/validation.py new file mode 100644 index 0000000000000000000000000000000000000000..5cd9a42856601c40d8ff0e4f701c8a0e1c7b9852 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/mysql/validation.py @@ -0,0 +1,60 @@ +from django.core import checks +from django.db.backends.base.validation import BaseDatabaseValidation +from django.utils.version import get_docs_version + + +class DatabaseValidation(BaseDatabaseValidation): + def check(self, **kwargs): + issues = super().check(**kwargs) + issues.extend(self._check_sql_mode(**kwargs)) + return issues + + def _check_sql_mode(self, **kwargs): + with self.connection.cursor() as cursor: + cursor.execute("SELECT @@sql_mode") + sql_mode = cursor.fetchone() + modes = set(sql_mode[0].split(',') if sql_mode else ()) + if not (modes & {'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES'}): + return [checks.Warning( + "MySQL Strict Mode is not set for database connection '%s'" % self.connection.alias, + hint="MySQL's Strict Mode fixes many data integrity problems in MySQL, " + "such as data truncation upon insertion, by escalating warnings into " + "errors. It is strongly recommended you activate it. See: " + "https://docs.djangoproject.com/en/%s/ref/databases/#mysql-sql-mode" + % (get_docs_version(),), + id='mysql.W002', + )] + return [] + + def check_field_type(self, field, field_type): + """ + MySQL has the following field length restriction: + No character (varchar) fields can have a length exceeding 255 + characters if they have a unique index on them. + MySQL doesn't support a database index on some data types. + """ + errors = [] + if (field_type.startswith('varchar') and field.unique and + (field.max_length is None or int(field.max_length) > 255)): + errors.append( + checks.Error( + 'MySQL does not allow unique CharFields to have a max_length > 255.', + obj=field, + id='mysql.E001', + ) + ) + + if field.db_index and field_type.lower() in self.connection._limited_data_types: + errors.append( + checks.Warning( + 'MySQL does not support a database index on %s columns.' + % field_type, + hint=( + "An index won't be created. Silence this warning if " + "you don't care about it." + ), + obj=field, + id='fields.W162', + ) + ) + return errors diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ae77de57d05cb9d5c1d988d404c662374e4dba3b --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/base.py @@ -0,0 +1,540 @@ +""" +Oracle database backend for Django. + +Requires cx_Oracle: http://cx-oracle.sourceforge.net/ +""" +import datetime +import decimal +import os +import platform + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.db import utils +from django.db.backends.base.base import BaseDatabaseWrapper +from django.utils.encoding import force_bytes, force_text +from django.utils.functional import cached_property + + +def _setup_environment(environ): + # Cygwin requires some special voodoo to set the environment variables + # properly so that Oracle will see them. + if platform.system().upper().startswith('CYGWIN'): + try: + import ctypes + except ImportError as e: + raise ImproperlyConfigured("Error loading ctypes: %s; " + "the Oracle backend requires ctypes to " + "operate correctly under Cygwin." % e) + kernel32 = ctypes.CDLL('kernel32') + for name, value in environ: + kernel32.SetEnvironmentVariableA(name, value) + else: + os.environ.update(environ) + + +_setup_environment([ + # Oracle takes client-side character set encoding from the environment. + ('NLS_LANG', '.AL32UTF8'), + # This prevents unicode from getting mangled by getting encoded into the + # potentially non-unicode database character set. + ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'), +]) + + +try: + import cx_Oracle as Database +except ImportError as e: + raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) + +# Some of these import cx_Oracle, so import them after checking if it's installed. +from .client import DatabaseClient # NOQA isort:skip +from .creation import DatabaseCreation # NOQA isort:skip +from .features import DatabaseFeatures # NOQA isort:skip +from .introspection import DatabaseIntrospection # NOQA isort:skip +from .operations import DatabaseOperations # NOQA isort:skip +from .schema import DatabaseSchemaEditor # NOQA isort:skip +from .utils import Oracle_datetime # NOQA isort:skip +from .validation import DatabaseValidation # NOQA isort:skip + + +class _UninitializedOperatorsDescriptor: + + def __get__(self, instance, cls=None): + # If connection.operators is looked up before a connection has been + # created, transparently initialize connection.operators to avert an + # AttributeError. + if instance is None: + raise AttributeError("operators not available as class attribute") + # Creating a cursor will initialize the operators. + instance.cursor().close() + return instance.__dict__['operators'] + + +class DatabaseWrapper(BaseDatabaseWrapper): + vendor = 'oracle' + display_name = 'Oracle' + # This dictionary maps Field objects to their associated Oracle column + # types, as strings. Column-type strings can contain format strings; they'll + # be interpolated against the values of Field.__dict__ before being output. + # If a column type is set to None, it won't be included in the output. + # + # Any format strings starting with "qn_" are quoted before being used in the + # output (the "qn_" prefix is stripped before the lookup is performed. + data_types = { + 'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY', + 'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY', + 'BinaryField': 'BLOB', + 'BooleanField': 'NUMBER(1)', + 'CharField': 'NVARCHAR2(%(max_length)s)', + 'DateField': 'DATE', + 'DateTimeField': 'TIMESTAMP', + 'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)', + 'DurationField': 'INTERVAL DAY(9) TO SECOND(6)', + 'FileField': 'NVARCHAR2(%(max_length)s)', + 'FilePathField': 'NVARCHAR2(%(max_length)s)', + 'FloatField': 'DOUBLE PRECISION', + 'IntegerField': 'NUMBER(11)', + 'BigIntegerField': 'NUMBER(19)', + 'IPAddressField': 'VARCHAR2(15)', + 'GenericIPAddressField': 'VARCHAR2(39)', + 'NullBooleanField': 'NUMBER(1)', + 'OneToOneField': 'NUMBER(11)', + 'PositiveIntegerField': 'NUMBER(11)', + 'PositiveSmallIntegerField': 'NUMBER(11)', + 'SlugField': 'NVARCHAR2(%(max_length)s)', + 'SmallIntegerField': 'NUMBER(11)', + 'TextField': 'NCLOB', + 'TimeField': 'TIMESTAMP', + 'URLField': 'VARCHAR2(%(max_length)s)', + 'UUIDField': 'VARCHAR2(32)', + } + data_type_check_constraints = { + 'BooleanField': '%(qn_column)s IN (0,1)', + 'NullBooleanField': '%(qn_column)s IN (0,1)', + 'PositiveIntegerField': '%(qn_column)s >= 0', + 'PositiveSmallIntegerField': '%(qn_column)s >= 0', + } + + # Oracle doesn't support a database index on these columns. + _limited_data_types = ('clob', 'nclob', 'blob') + + operators = _UninitializedOperatorsDescriptor() + + _standard_operators = { + 'exact': '= %s', + 'iexact': '= UPPER(%s)', + 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", + 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", + 'gt': '> %s', + 'gte': '>= %s', + 'lt': '< %s', + 'lte': '<= %s', + 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", + 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", + 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", + 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", + } + + _likec_operators = _standard_operators.copy() + _likec_operators.update({ + 'contains': "LIKEC %s ESCAPE '\\'", + 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'", + 'startswith': "LIKEC %s ESCAPE '\\'", + 'endswith': "LIKEC %s ESCAPE '\\'", + 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'", + 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'", + }) + + # The patterns below are used to generate SQL pattern lookup clauses when + # the right-hand side of the lookup isn't a raw string (it might be an expression + # or the result of a bilateral transformation). + # In those cases, special characters for LIKE operators (e.g. \, *, _) should be + # escaped on database side. + # + # Note: we use str.format() here for readability as '%' is used as a wildcard for + # the LIKE operator. + pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" + _pattern_ops = { + 'contains': "'%%' || {} || '%%'", + 'icontains': "'%%' || UPPER({}) || '%%'", + 'startswith': "{} || '%%'", + 'istartswith': "UPPER({}) || '%%'", + 'endswith': "'%%' || {}", + 'iendswith': "'%%' || UPPER({})", + } + + _standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)" + " ESCAPE TRANSLATE('\\' USING NCHAR_CS)" + for k, v in _pattern_ops.items()} + _likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'" + for k, v in _pattern_ops.items()} + + Database = Database + SchemaEditorClass = DatabaseSchemaEditor + # Classes instantiated in __init__(). + client_class = DatabaseClient + creation_class = DatabaseCreation + features_class = DatabaseFeatures + introspection_class = DatabaseIntrospection + ops_class = DatabaseOperations + validation_class = DatabaseValidation + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True) + self.features.can_return_id_from_insert = use_returning_into + + def _connect_string(self): + settings_dict = self.settings_dict + if not settings_dict['HOST'].strip(): + settings_dict['HOST'] = 'localhost' + if settings_dict['PORT']: + dsn = Database.makedsn(settings_dict['HOST'], + int(settings_dict['PORT']), + settings_dict['NAME']) + else: + dsn = settings_dict['NAME'] + return "%s/%s@%s" % (settings_dict['USER'], + settings_dict['PASSWORD'], dsn) + + def get_connection_params(self): + conn_params = self.settings_dict['OPTIONS'].copy() + if 'use_returning_into' in conn_params: + del conn_params['use_returning_into'] + return conn_params + + def get_new_connection(self, conn_params): + return Database.connect(self._connect_string(), **conn_params) + + def init_connection_state(self): + cursor = self.create_cursor() + # Set the territory first. The territory overrides NLS_DATE_FORMAT + # and NLS_TIMESTAMP_FORMAT to the territory default. When all of + # these are set in single statement it isn't clear what is supposed + # to happen. + cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'") + # Set Oracle date to ANSI date format. This only needs to execute + # once when we create a new connection. We also set the Territory + # to 'AMERICA' which forces Sunday to evaluate to a '1' in + # TO_CHAR(). + cursor.execute( + "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'" + " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" + + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else '') + ) + cursor.close() + if 'operators' not in self.__dict__: + # Ticket #14149: Check whether our LIKE implementation will + # work for this connection or we need to fall back on LIKEC. + # This check is performed only once per DatabaseWrapper + # instance per thread, since subsequent connections will use + # the same settings. + cursor = self.create_cursor() + try: + cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" + % self._standard_operators['contains'], + ['X']) + except Database.DatabaseError: + self.operators = self._likec_operators + self.pattern_ops = self._likec_pattern_ops + else: + self.operators = self._standard_operators + self.pattern_ops = self._standard_pattern_ops + cursor.close() + self.connection.stmtcachesize = 20 + # Ensure all changes are preserved even when AUTOCOMMIT is False. + if not self.get_autocommit(): + self.commit() + + def create_cursor(self, name=None): + return FormatStylePlaceholderCursor(self.connection) + + def _commit(self): + if self.connection is not None: + try: + return self.connection.commit() + except Database.DatabaseError as e: + # cx_Oracle raises a cx_Oracle.DatabaseError exception + # with the following attributes and values: + # code = 2091 + # message = 'ORA-02091: transaction rolled back + # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS + # _C00102056) violated - parent key not found' + # We convert that particular case to our IntegrityError exception + x = e.args[0] + if hasattr(x, 'code') and hasattr(x, 'message') \ + and x.code == 2091 and 'ORA-02291' in x.message: + raise utils.IntegrityError(*tuple(e.args)) + raise + + # Oracle doesn't support releasing savepoints. But we fake them when query + # logging is enabled to keep query counts consistent with other backends. + def _savepoint_commit(self, sid): + if self.queries_logged: + self.queries_log.append({ + 'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid), + 'time': '0.000', + }) + + def _set_autocommit(self, autocommit): + with self.wrap_database_errors: + self.connection.autocommit = autocommit + + def check_constraints(self, table_names=None): + """ + Check constraints by setting them to immediate. Return them to deferred + afterward. + """ + self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') + self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') + + def is_usable(self): + try: + self.connection.ping() + except Database.Error: + return False + else: + return True + + @cached_property + def oracle_full_version(self): + with self.temporary_connection(): + return self.connection.version + + @cached_property + def oracle_version(self): + try: + return int(self.oracle_full_version.split('.')[0]) + except ValueError: + return None + + +class OracleParam: + """ + Wrapper object for formatting parameters for Oracle. If the string + representation of the value is large enough (greater than 4000 characters) + the input size needs to be set as CLOB. Alternatively, if the parameter + has an `input_size` attribute, then the value of the `input_size` attribute + will be used instead. Otherwise, no input size will be set for the + parameter when executing the query. + """ + + def __init__(self, param, cursor, strings_only=False): + # With raw SQL queries, datetimes can reach this function + # without being converted by DateTimeField.get_db_prep_value. + if settings.USE_TZ and (isinstance(param, datetime.datetime) and + not isinstance(param, Oracle_datetime)): + param = Oracle_datetime.from_datetime(param) + + string_size = 0 + # Oracle doesn't recognize True and False correctly. + if param is True: + param = 1 + elif param is False: + param = 0 + if hasattr(param, 'bind_parameter'): + self.force_bytes = param.bind_parameter(cursor) + elif isinstance(param, (Database.Binary, datetime.timedelta)): + self.force_bytes = param + else: + # To transmit to the database, we need Unicode if supported + # To get size right, we must consider bytes. + self.force_bytes = force_text(param, cursor.charset, strings_only) + if isinstance(self.force_bytes, str): + # We could optimize by only converting up to 4000 bytes here + string_size = len(force_bytes(param, cursor.charset, strings_only)) + if hasattr(param, 'input_size'): + # If parameter has `input_size` attribute, use that. + self.input_size = param.input_size + elif string_size > 4000: + # Mark any string param greater than 4000 characters as a CLOB. + self.input_size = Database.CLOB + else: + self.input_size = None + + +class VariableWrapper: + """ + An adapter class for cursor variables that prevents the wrapped object + from being converted into a string when used to instantiate an OracleParam. + This can be used generally for any other object that should be passed into + Cursor.execute as-is. + """ + + def __init__(self, var): + self.var = var + + def bind_parameter(self, cursor): + return self.var + + def __getattr__(self, key): + return getattr(self.var, key) + + def __setattr__(self, key, value): + if key == 'var': + self.__dict__[key] = value + else: + setattr(self.var, key, value) + + +class FormatStylePlaceholderCursor: + """ + Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" + style. This fixes it -- but note that if you want to use a literal "%s" in + a query, you'll need to use "%%s". + + We also do automatic conversion between Unicode on the Python side and + UTF-8 -- for talking to Oracle -- in here. + """ + charset = 'utf-8' + + def __init__(self, connection): + self.cursor = connection.cursor() + self.cursor.outputtypehandler = self._output_type_handler + # The default for cx_Oracle < 5.3 is 50. + self.cursor.arraysize = 100 + + @staticmethod + def _output_number_converter(value): + return decimal.Decimal(value) if '.' in value else int(value) + + @staticmethod + def _output_type_handler(cursor, name, defaultType, length, precision, scale): + """ + Called for each db column fetched from cursors. Return numbers as the + appropriate Python type. + """ + if defaultType == Database.NUMBER: + if scale == -127: + if precision == 0: + # NUMBER column: decimal-precision floating point. + # This will normally be an integer from a sequence, + # but it could be a decimal value. + outconverter = FormatStylePlaceholderCursor._output_number_converter + else: + # FLOAT column: binary-precision floating point. + # This comes from FloatField columns. + outconverter = float + elif precision > 0: + # NUMBER(p,s) column: decimal-precision fixed point. + # This comes from IntegerField and DecimalField columns. + outconverter = int if scale == 0 else decimal.Decimal + else: + # No type information. This normally comes from a + # mathematical expression in the SELECT list. Guess int + # or Decimal based on whether it has a decimal point. + outconverter = FormatStylePlaceholderCursor._output_number_converter + return cursor.var( + Database.STRING, + size=255, + arraysize=cursor.arraysize, + outconverter=outconverter, + ) + + def _format_params(self, params): + try: + return {k: OracleParam(v, self, True) for k, v in params.items()} + except AttributeError: + return tuple(OracleParam(p, self, True) for p in params) + + def _guess_input_sizes(self, params_list): + # Try dict handling; if that fails, treat as sequence + if hasattr(params_list[0], 'keys'): + sizes = {} + for params in params_list: + for k, value in params.items(): + if value.input_size: + sizes[k] = value.input_size + self.setinputsizes(**sizes) + else: + # It's not a list of dicts; it's a list of sequences + sizes = [None] * len(params_list[0]) + for params in params_list: + for i, value in enumerate(params): + if value.input_size: + sizes[i] = value.input_size + self.setinputsizes(*sizes) + + def _param_generator(self, params): + # Try dict handling; if that fails, treat as sequence + if hasattr(params, 'items'): + return {k: v.force_bytes for k, v in params.items()} + else: + return [p.force_bytes for p in params] + + def _fix_for_params(self, query, params, unify_by_values=False): + # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it + # it does want a trailing ';' but not a trailing '/'. However, these + # characters must be included in the original query in case the query + # is being passed to SQL*Plus. + if query.endswith(';') or query.endswith('/'): + query = query[:-1] + if params is None: + params = [] + elif hasattr(params, 'keys'): + # Handle params as dict + args = {k: ":%s" % k for k in params} + query = query % args + elif unify_by_values and len(params) > 0: + # Handle params as a dict with unified query parameters by their + # values. It can be used only in single query execute() because + # executemany() shares the formatted query with each of the params + # list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75] + # params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'} + # args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0'] + # params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'} + params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))} + args = [params_dict[param] for param in params] + params = {value: key for key, value in params_dict.items()} + query = query % tuple(args) + else: + # Handle params as sequence + args = [(':arg%d' % i) for i in range(len(params))] + query = query % tuple(args) + return query, self._format_params(params) + + def execute(self, query, params=None): + query, params = self._fix_for_params(query, params, unify_by_values=True) + self._guess_input_sizes([params]) + return self.cursor.execute(query, self._param_generator(params)) + + def executemany(self, query, params=None): + if not params: + # No params given, nothing to do + return None + # uniform treatment for sequences and iterables + params_iter = iter(params) + query, firstparams = self._fix_for_params(query, next(params_iter)) + # we build a list of formatted params; as we're going to traverse it + # more than once, we can't make it lazy by using a generator + formatted = [firstparams] + [self._format_params(p) for p in params_iter] + self._guess_input_sizes(formatted) + return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) + + def fetchmany(self, size=None): + if size is None: + size = self.arraysize + return tuple(self.cursor.fetchmany(size)) + + def fetchall(self): + return tuple(self.cursor.fetchall()) + + def close(self): + try: + self.cursor.close() + except Database.InterfaceError: + # already closed + pass + + def var(self, *args): + return VariableWrapper(self.cursor.var(*args)) + + def arrayvar(self, *args): + return VariableWrapper(self.cursor.arrayvar(*args)) + + def __getattr__(self, attr): + return getattr(self.cursor, attr) + + def __iter__(self): + return iter(self.cursor) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/client.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/client.py new file mode 100644 index 0000000000000000000000000000000000000000..102e77fd1592277318b5e067c7b8c8b564ead302 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/client.py @@ -0,0 +1,12 @@ +import subprocess + +from django.db.backends.base.client import BaseDatabaseClient + + +class DatabaseClient(BaseDatabaseClient): + executable_name = 'sqlplus' + + def runshell(self): + conn_string = self.connection._connect_string() + args = [self.executable_name, "-L", conn_string] + subprocess.check_call(args) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/compiler.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b568e59e9e9c46ebf37eed2a6c9405c5a77cb3a8 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/compiler.py @@ -0,0 +1,62 @@ +from django.db import NotSupportedError +from django.db.models.sql import compiler + + +class SQLCompiler(compiler.SQLCompiler): + def as_sql(self, with_limits=True, with_col_aliases=False): + """ + Create the SQL for this query. Return the SQL string and list + of parameters. This is overridden from the original Query class + to handle the additional SQL Oracle requires to emulate LIMIT + and OFFSET. + + If 'with_limits' is False, any limit/offset information is not + included in the query. + """ + # The `do_offset` flag indicates whether we need to construct + # the SQL needed to use limit/offset with Oracle. + do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) + if not do_offset: + sql, params = super().as_sql(with_limits=False, with_col_aliases=with_col_aliases) + elif not self.connection.features.supports_select_for_update_with_limit and self.query.select_for_update: + raise NotSupportedError( + 'LIMIT/OFFSET is not supported with select_for_update on this ' + 'database backend.' + ) + else: + sql, params = super().as_sql(with_limits=False, with_col_aliases=True) + # Wrap the base query in an outer SELECT * with boundaries on + # the "_RN" column. This is the canonical way to emulate LIMIT + # and OFFSET on Oracle. + high_where = '' + if self.query.high_mark is not None: + high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,) + + if self.query.low_mark: + sql = ( + 'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) ' + '"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark) + ) + else: + # Simplify the query to support subqueries if there's no offset. + sql = ( + 'SELECT * FROM (SELECT "_SUB".* FROM (%s) "_SUB" %s)' % (sql, high_where) + ) + + return sql, params + + +class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): + pass + + +class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): + pass + + +class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): + pass + + +class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): + pass diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/creation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..fe053c54f727779ddc4f16f9331b96623cdbafbd --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/creation.py @@ -0,0 +1,382 @@ +import sys + +from django.conf import settings +from django.db.backends.base.creation import BaseDatabaseCreation +from django.db.utils import DatabaseError +from django.utils.crypto import get_random_string +from django.utils.functional import cached_property + +TEST_DATABASE_PREFIX = 'test_' + + +class DatabaseCreation(BaseDatabaseCreation): + + @cached_property + def _maindb_connection(self): + """ + This is analogous to other backends' `_nodb_connection` property, + which allows access to an "administrative" connection which can + be used to manage the test databases. + For Oracle, the only connection that can be used for that purpose + is the main (non-test) connection. + """ + settings_dict = settings.DATABASES[self.connection.alias] + user = settings_dict.get('SAVED_USER') or settings_dict['USER'] + password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD'] + settings_dict = settings_dict.copy() + settings_dict.update(USER=user, PASSWORD=password) + DatabaseWrapper = type(self.connection) + return DatabaseWrapper(settings_dict, alias=self.connection.alias) + + def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False): + parameters = self._get_test_db_params() + cursor = self._maindb_connection.cursor() + if self._test_database_create(): + try: + self._execute_test_db_creation(cursor, parameters, verbosity, keepdb) + except Exception as e: + if 'ORA-01543' not in str(e): + # All errors except "tablespace already exists" cancel tests + sys.stderr.write("Got an error creating the test database: %s\n" % e) + sys.exit(2) + if not autoclobber: + confirm = input( + "It appears the test database, %s, already exists. " + "Type 'yes' to delete it, or 'no' to cancel: " % parameters['user']) + if autoclobber or confirm == 'yes': + if verbosity >= 1: + print("Destroying old test database for alias '%s'..." % self.connection.alias) + try: + self._execute_test_db_destruction(cursor, parameters, verbosity) + except DatabaseError as e: + if 'ORA-29857' in str(e): + self._handle_objects_preventing_db_destruction(cursor, parameters, + verbosity, autoclobber) + else: + # Ran into a database error that isn't about leftover objects in the tablespace + sys.stderr.write("Got an error destroying the old test database: %s\n" % e) + sys.exit(2) + except Exception as e: + sys.stderr.write("Got an error destroying the old test database: %s\n" % e) + sys.exit(2) + try: + self._execute_test_db_creation(cursor, parameters, verbosity, keepdb) + except Exception as e: + sys.stderr.write("Got an error recreating the test database: %s\n" % e) + sys.exit(2) + else: + print("Tests cancelled.") + sys.exit(1) + + if self._test_user_create(): + if verbosity >= 1: + print("Creating test user...") + try: + self._create_test_user(cursor, parameters, verbosity, keepdb) + except Exception as e: + if 'ORA-01920' not in str(e): + # All errors except "user already exists" cancel tests + sys.stderr.write("Got an error creating the test user: %s\n" % e) + sys.exit(2) + if not autoclobber: + confirm = input( + "It appears the test user, %s, already exists. Type " + "'yes' to delete it, or 'no' to cancel: " % parameters['user']) + if autoclobber or confirm == 'yes': + try: + if verbosity >= 1: + print("Destroying old test user...") + self._destroy_test_user(cursor, parameters, verbosity) + if verbosity >= 1: + print("Creating test user...") + self._create_test_user(cursor, parameters, verbosity, keepdb) + except Exception as e: + sys.stderr.write("Got an error recreating the test user: %s\n" % e) + sys.exit(2) + else: + print("Tests cancelled.") + sys.exit(1) + + # Cursor must be closed before closing connection. + cursor.close() + self._maindb_connection.close() # done with main user -- test user and tablespaces created + self._switch_to_test_user(parameters) + return self.connection.settings_dict['NAME'] + + def _switch_to_test_user(self, parameters): + """ + Switch to the user that's used for creating the test database. + + Oracle doesn't have the concept of separate databases under the same + user, so a separate user is used; see _create_test_db(). The main user + is also needed for cleanup when testing is completed, so save its + credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict. + """ + real_settings = settings.DATABASES[self.connection.alias] + real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \ + self.connection.settings_dict['USER'] + real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \ + self.connection.settings_dict['PASSWORD'] + real_test_settings = real_settings['TEST'] + test_settings = self.connection.settings_dict['TEST'] + real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \ + self.connection.settings_dict['USER'] = parameters['user'] + real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password'] + + def set_as_test_mirror(self, primary_settings_dict): + """ + Set this database up to be used in testing as a mirror of a primary + database whose settings are given. + """ + self.connection.settings_dict['USER'] = primary_settings_dict['USER'] + self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD'] + + def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber): + # There are objects in the test tablespace which prevent dropping it + # The easy fix is to drop the test user -- but are we allowed to do so? + print("There are objects in the old test database which prevent its destruction.") + print("If they belong to the test user, deleting the user will allow the test " + "database to be recreated.") + print("Otherwise, you will need to find and remove each of these objects, " + "or use a different tablespace.\n") + if self._test_user_create(): + if not autoclobber: + confirm = input("Type 'yes' to delete user %s: " % parameters['user']) + if autoclobber or confirm == 'yes': + try: + if verbosity >= 1: + print("Destroying old test user...") + self._destroy_test_user(cursor, parameters, verbosity) + except Exception as e: + sys.stderr.write("Got an error destroying the test user: %s\n" % e) + sys.exit(2) + try: + if verbosity >= 1: + print("Destroying old test database for alias '%s'..." % self.connection.alias) + self._execute_test_db_destruction(cursor, parameters, verbosity) + except Exception as e: + sys.stderr.write("Got an error destroying the test database: %s\n" % e) + sys.exit(2) + else: + print("Tests cancelled -- test database cannot be recreated.") + sys.exit(1) + else: + print("Django is configured to use pre-existing test user '%s'," + " and will not attempt to delete it.\n" % parameters['user']) + print("Tests cancelled -- test database cannot be recreated.") + sys.exit(1) + + def _destroy_test_db(self, test_database_name, verbosity=1): + """ + Destroy a test database, prompting the user for confirmation if the + database already exists. Return the name of the test database created. + """ + self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER'] + self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] + self.connection.close() + parameters = self._get_test_db_params() + cursor = self._maindb_connection.cursor() + if self._test_user_create(): + if verbosity >= 1: + print('Destroying test user...') + self._destroy_test_user(cursor, parameters, verbosity) + if self._test_database_create(): + if verbosity >= 1: + print('Destroying test database tables...') + self._execute_test_db_destruction(cursor, parameters, verbosity) + # Cursor must be closed before closing connection. + cursor.close() + self._maindb_connection.close() + + def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False): + if verbosity >= 2: + print("_create_test_db(): dbname = %s" % parameters['user']) + statements = [ + """CREATE TABLESPACE %(tblspace)s + DATAFILE '%(datafile)s' SIZE %(size)s + REUSE AUTOEXTEND ON NEXT %(extsize)s MAXSIZE %(maxsize)s + """, + """CREATE TEMPORARY TABLESPACE %(tblspace_temp)s + TEMPFILE '%(datafile_tmp)s' SIZE %(size_tmp)s + REUSE AUTOEXTEND ON NEXT %(extsize_tmp)s MAXSIZE %(maxsize_tmp)s + """, + ] + # Ignore "tablespace already exists" error when keepdb is on. + acceptable_ora_err = 'ORA-01543' if keepdb else None + self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err) + + def _create_test_user(self, cursor, parameters, verbosity, keepdb=False): + if verbosity >= 2: + print("_create_test_user(): username = %s" % parameters['user']) + statements = [ + """CREATE USER %(user)s + IDENTIFIED BY "%(password)s" + DEFAULT TABLESPACE %(tblspace)s + TEMPORARY TABLESPACE %(tblspace_temp)s + QUOTA UNLIMITED ON %(tblspace)s + """, + """GRANT CREATE SESSION, + CREATE TABLE, + CREATE SEQUENCE, + CREATE PROCEDURE, + CREATE TRIGGER + TO %(user)s""", + ] + # Ignore "user already exists" error when keepdb is on + acceptable_ora_err = 'ORA-01920' if keepdb else None + success = self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err) + # If the password was randomly generated, change the user accordingly. + if not success and self._test_settings_get('PASSWORD') is None: + set_password = 'ALTER USER %(user)s IDENTIFIED BY "%(password)s"' + self._execute_statements(cursor, [set_password], parameters, verbosity) + # Most test-suites can be run without the create-view privilege. But some need it. + extra = "GRANT CREATE VIEW TO %(user)s" + success = self._execute_allow_fail_statements(cursor, [extra], parameters, verbosity, 'ORA-01031') + if not success and verbosity >= 2: + print("Failed to grant CREATE VIEW permission to test user. This may be ok.") + + def _execute_test_db_destruction(self, cursor, parameters, verbosity): + if verbosity >= 2: + print("_execute_test_db_destruction(): dbname=%s" % parameters['user']) + statements = [ + 'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', + 'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', + ] + self._execute_statements(cursor, statements, parameters, verbosity) + + def _destroy_test_user(self, cursor, parameters, verbosity): + if verbosity >= 2: + print("_destroy_test_user(): user=%s" % parameters['user']) + print("Be patient. This can take some time...") + statements = [ + 'DROP USER %(user)s CASCADE', + ] + self._execute_statements(cursor, statements, parameters, verbosity) + + def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False): + for template in statements: + stmt = template % parameters + if verbosity >= 2: + print(stmt) + try: + cursor.execute(stmt) + except Exception as err: + if (not allow_quiet_fail) or verbosity >= 2: + sys.stderr.write("Failed (%s)\n" % (err)) + raise + + def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err): + """ + Execute statements which are allowed to fail silently if the Oracle + error code given by `acceptable_ora_err` is raised. Return True if the + statements execute without an exception, or False otherwise. + """ + try: + # Statement can fail when acceptable_ora_err is not None + allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0 + self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail) + return True + except DatabaseError as err: + description = str(err) + if acceptable_ora_err is None or acceptable_ora_err not in description: + raise + return False + + def _get_test_db_params(self): + return { + 'dbname': self._test_database_name(), + 'user': self._test_database_user(), + 'password': self._test_database_passwd(), + 'tblspace': self._test_database_tblspace(), + 'tblspace_temp': self._test_database_tblspace_tmp(), + 'datafile': self._test_database_tblspace_datafile(), + 'datafile_tmp': self._test_database_tblspace_tmp_datafile(), + 'maxsize': self._test_database_tblspace_maxsize(), + 'maxsize_tmp': self._test_database_tblspace_tmp_maxsize(), + 'size': self._test_database_tblspace_size(), + 'size_tmp': self._test_database_tblspace_tmp_size(), + 'extsize': self._test_database_tblspace_extsize(), + 'extsize_tmp': self._test_database_tblspace_tmp_extsize(), + } + + def _test_settings_get(self, key, default=None, prefixed=None): + """ + Return a value from the test settings dict, or a given default, or a + prefixed entry from the main settings dict. + """ + settings_dict = self.connection.settings_dict + val = settings_dict['TEST'].get(key, default) + if val is None and prefixed: + val = TEST_DATABASE_PREFIX + settings_dict[prefixed] + return val + + def _test_database_name(self): + return self._test_settings_get('NAME', prefixed='NAME') + + def _test_database_create(self): + return self._test_settings_get('CREATE_DB', default=True) + + def _test_user_create(self): + return self._test_settings_get('CREATE_USER', default=True) + + def _test_database_user(self): + return self._test_settings_get('USER', prefixed='USER') + + def _test_database_passwd(self): + password = self._test_settings_get('PASSWORD') + if password is None and self._test_user_create(): + # Oracle passwords are limited to 30 chars and can't contain symbols. + password = get_random_string(length=30) + return password + + def _test_database_tblspace(self): + return self._test_settings_get('TBLSPACE', prefixed='USER') + + def _test_database_tblspace_tmp(self): + settings_dict = self.connection.settings_dict + return settings_dict['TEST'].get('TBLSPACE_TMP', + TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp') + + def _test_database_tblspace_datafile(self): + tblspace = '%s.dbf' % self._test_database_tblspace() + return self._test_settings_get('DATAFILE', default=tblspace) + + def _test_database_tblspace_tmp_datafile(self): + tblspace = '%s.dbf' % self._test_database_tblspace_tmp() + return self._test_settings_get('DATAFILE_TMP', default=tblspace) + + def _test_database_tblspace_maxsize(self): + return self._test_settings_get('DATAFILE_MAXSIZE', default='500M') + + def _test_database_tblspace_tmp_maxsize(self): + return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M') + + def _test_database_tblspace_size(self): + return self._test_settings_get('DATAFILE_SIZE', default='50M') + + def _test_database_tblspace_tmp_size(self): + return self._test_settings_get('DATAFILE_TMP_SIZE', default='50M') + + def _test_database_tblspace_extsize(self): + return self._test_settings_get('DATAFILE_EXTSIZE', default='25M') + + def _test_database_tblspace_tmp_extsize(self): + return self._test_settings_get('DATAFILE_TMP_EXTSIZE', default='25M') + + def _get_test_db_name(self): + """ + Return the 'production' DB name to get the test DB creation machinery + to work. This isn't a great deal in this case because DB names as + handled by Django don't have real counterparts in Oracle. + """ + return self.connection.settings_dict['NAME'] + + def test_db_signature(self): + settings_dict = self.connection.settings_dict + return ( + settings_dict['HOST'], + settings_dict['PORT'], + settings_dict['ENGINE'], + settings_dict['NAME'], + self._test_database_user(), + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/features.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/features.py new file mode 100644 index 0000000000000000000000000000000000000000..71421f0df8dd010110bc60509a42ec217e730bc5 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/features.py @@ -0,0 +1,58 @@ +from django.db.backends.base.features import BaseDatabaseFeatures +from django.db.utils import InterfaceError + + +class DatabaseFeatures(BaseDatabaseFeatures): + empty_fetchmany_value = () + interprets_empty_strings_as_nulls = True + uses_savepoints = True + has_select_for_update = True + has_select_for_update_nowait = True + has_select_for_update_skip_locked = True + has_select_for_update_of = True + select_for_update_of_column = True + can_return_id_from_insert = True + allow_sliced_subqueries = False + can_introspect_autofield = True + supports_subqueries_in_group_by = False + supports_transactions = True + supports_timezones = False + has_native_duration_field = True + can_defer_constraint_checks = True + supports_partially_nullable_unique_constraints = False + truncates_names = True + supports_tablespaces = True + supports_sequence_reset = False + can_introspect_time_field = False + atomic_transactions = False + supports_combined_alters = False + nulls_order_largest = True + requires_literal_defaults = True + closed_cursor_error_class = InterfaceError + bare_select_suffix = " FROM DUAL" + uppercases_column_names = True + # select for update with limit can be achieved on Oracle, but not with the current backend. + supports_select_for_update_with_limit = False + supports_temporal_subtraction = True + # Oracle doesn't ignore quoted identifiers case but the current backend + # does by uppercasing all identifiers. + ignores_table_name_case = True + supports_index_on_text_field = False + has_case_insensitive_like = False + create_test_procedure_without_params_sql = """ + CREATE PROCEDURE "TEST_PROCEDURE" AS + V_I INTEGER; + BEGIN + V_I := 1; + END; + """ + create_test_procedure_with_int_param_sql = """ + CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS + V_I INTEGER; + BEGIN + V_I := P_I; + END; + """ + supports_callproc_kwargs = True + supports_over_clause = True + max_query_params = 2**16 - 1 diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/functions.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..1aeb4597e3e47725f3345650503ae8dd4502fd0a --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/functions.py @@ -0,0 +1,22 @@ +from django.db.models import DecimalField, DurationField, Func + + +class IntervalToSeconds(Func): + function = '' + template = """ + EXTRACT(day from %(expressions)s) * 86400 + + EXTRACT(hour from %(expressions)s) * 3600 + + EXTRACT(minute from %(expressions)s) * 60 + + EXTRACT(second from %(expressions)s) + """ + + def __init__(self, expression, *, output_field=None, **extra): + super().__init__(expression, output_field=output_field or DecimalField(), **extra) + + +class SecondsToInterval(Func): + function = 'NUMTODSINTERVAL' + template = "%(function)s(%(expressions)s, 'SECOND')" + + def __init__(self, expression, *, output_field=None, **extra): + super().__init__(expression, output_field=output_field or DurationField(), **extra) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/introspection.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..7b873fd0d0eec36e7254f98e03b756332bd9ea69 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/introspection.py @@ -0,0 +1,291 @@ +import warnings +from collections import namedtuple + +import cx_Oracle + +from django.db import models +from django.db.backends.base.introspection import ( + BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo, +) +from django.utils.deprecation import RemovedInDjango21Warning + +FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('is_autofield',)) + + +class DatabaseIntrospection(BaseDatabaseIntrospection): + # Maps type objects to Django Field types. + data_types_reverse = { + cx_Oracle.BLOB: 'BinaryField', + cx_Oracle.CLOB: 'TextField', + cx_Oracle.DATETIME: 'DateField', + cx_Oracle.FIXED_CHAR: 'CharField', + cx_Oracle.FIXED_NCHAR: 'CharField', + cx_Oracle.NATIVE_FLOAT: 'FloatField', + cx_Oracle.NCHAR: 'CharField', + cx_Oracle.NCLOB: 'TextField', + cx_Oracle.NUMBER: 'DecimalField', + cx_Oracle.STRING: 'CharField', + cx_Oracle.TIMESTAMP: 'DateTimeField', + } + + cache_bust_counter = 1 + + def get_field_type(self, data_type, description): + if data_type == cx_Oracle.NUMBER: + precision, scale = description[4:6] + if scale == 0: + if precision > 11: + return 'BigAutoField' if description.is_autofield else 'BigIntegerField' + elif precision == 1: + return 'BooleanField' + elif description.is_autofield: + return 'AutoField' + else: + return 'IntegerField' + elif scale == -127: + return 'FloatField' + + return super().get_field_type(data_type, description) + + def get_table_list(self, cursor): + """Return a list of table and view names in the current database.""" + cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL " + "SELECT VIEW_NAME, 'v' FROM USER_VIEWS") + return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()] + + def get_table_description(self, cursor, table_name): + """ + Return a description of the table with the DB-API cursor.description + interface. + """ + # user_tab_columns gives data default for columns + cursor.execute(""" + SELECT + column_name, + data_default, + CASE + WHEN char_used IS NULL THEN data_length + ELSE char_length + END as internal_size, + CASE + WHEN identity_column = 'YES' THEN 1 + ELSE 0 + END as is_autofield + FROM user_tab_cols + WHERE table_name = UPPER(%s)""", [table_name]) + field_map = { + column: (internal_size, default if default != 'NULL' else None, is_autofield) + for column, default, internal_size, is_autofield in cursor.fetchall() + } + self.cache_bust_counter += 1 + cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format( + self.connection.ops.quote_name(table_name), + self.cache_bust_counter)) + description = [] + for desc in cursor.description: + name = desc[0] + internal_size, default, is_autofield = field_map[name] + name = name % {} # cx_Oracle, for some reason, doubles percent signs. + description.append(FieldInfo(*( + (name.lower(),) + + desc[1:3] + + (internal_size, desc[4] or 0, desc[5] or 0) + + desc[6:] + + (default, is_autofield) + ))) + return description + + def table_name_converter(self, name): + """Table name comparison is case insensitive under Oracle.""" + return name.lower() + + def get_sequences(self, cursor, table_name, table_fields=()): + cursor.execute(""" + SELECT + user_tab_identity_cols.sequence_name, + user_tab_identity_cols.column_name + FROM + user_tab_identity_cols, + user_constraints, + user_cons_columns cols + WHERE + user_constraints.constraint_name = cols.constraint_name + AND user_constraints.table_name = user_tab_identity_cols.table_name + AND cols.column_name = user_tab_identity_cols.column_name + AND user_constraints.constraint_type = 'P' + AND user_tab_identity_cols.table_name = UPPER(%s) + """, [table_name]) + # Oracle allows only one identity column per table. + row = cursor.fetchone() + if row: + return [{'name': row[0].lower(), 'table': table_name, 'column': row[1].lower()}] + # To keep backward compatibility for AutoFields that aren't Oracle + # identity columns. + for f in table_fields: + if isinstance(f, models.AutoField): + return [{'table': table_name, 'column': f.column}] + return [] + + def get_relations(self, cursor, table_name): + """ + Return a dictionary of {field_name: (field_name_other_table, other_table)} + representing all relationships to the given table. + """ + table_name = table_name.upper() + cursor.execute(""" + SELECT ca.column_name, cb.table_name, cb.column_name + FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb + WHERE user_constraints.table_name = %s AND + user_constraints.constraint_name = ca.constraint_name AND + user_constraints.r_constraint_name = cb.constraint_name AND + ca.position = cb.position""", [table_name]) + + relations = {} + for row in cursor.fetchall(): + relations[row[0].lower()] = (row[2].lower(), row[1].lower()) + return relations + + def get_key_columns(self, cursor, table_name): + cursor.execute(""" + SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column + FROM user_constraints c + JOIN user_cons_columns ccol + ON ccol.constraint_name = c.constraint_name + JOIN user_cons_columns rcol + ON rcol.constraint_name = c.r_constraint_name + WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()]) + return [tuple(cell.lower() for cell in row) + for row in cursor.fetchall()] + + def get_indexes(self, cursor, table_name): + warnings.warn( + "get_indexes() is deprecated in favor of get_constraints().", + RemovedInDjango21Warning, stacklevel=2 + ) + sql = """ + SELECT LOWER(uic1.column_name) AS column_name, + CASE user_constraints.constraint_type + WHEN 'P' THEN 1 ELSE 0 + END AS is_primary_key, + CASE user_indexes.uniqueness + WHEN 'UNIQUE' THEN 1 ELSE 0 + END AS is_unique + FROM user_constraints, user_indexes, user_ind_columns uic1 + WHERE user_constraints.constraint_type (+) = 'P' + AND user_constraints.index_name (+) = uic1.index_name + AND user_indexes.uniqueness (+) = 'UNIQUE' + AND user_indexes.index_name (+) = uic1.index_name + AND uic1.table_name = UPPER(%s) + AND uic1.column_position = 1 + AND NOT EXISTS ( + SELECT 1 + FROM user_ind_columns uic2 + WHERE uic2.index_name = uic1.index_name + AND uic2.column_position = 2 + ) + """ + cursor.execute(sql, [table_name]) + indexes = {} + for row in cursor.fetchall(): + indexes[row[0]] = {'primary_key': bool(row[1]), + 'unique': bool(row[2])} + return indexes + + def get_constraints(self, cursor, table_name): + """ + Retrieve any constraints or keys (unique, pk, fk, check, index) across + one or more columns. + """ + constraints = {} + # Loop over the constraints, getting PKs, uniques, and checks + cursor.execute(""" + SELECT + user_constraints.constraint_name, + LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), + CASE user_constraints.constraint_type + WHEN 'P' THEN 1 + ELSE 0 + END AS is_primary_key, + CASE + WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1 + ELSE 0 + END AS is_unique, + CASE user_constraints.constraint_type + WHEN 'C' THEN 1 + ELSE 0 + END AS is_check_constraint + FROM + user_constraints + LEFT OUTER JOIN + user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name + WHERE + user_constraints.constraint_type = ANY('P', 'U', 'C') + AND user_constraints.table_name = UPPER(%s) + GROUP BY user_constraints.constraint_name, user_constraints.constraint_type + """, [table_name]) + for constraint, columns, pk, unique, check in cursor.fetchall(): + constraints[constraint] = { + 'columns': columns.split(','), + 'primary_key': pk, + 'unique': unique, + 'foreign_key': None, + 'check': check, + 'index': unique, # All uniques come with an index + } + # Foreign key constraints + cursor.execute(""" + SELECT + cons.constraint_name, + LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), + LOWER(rcols.table_name), + LOWER(rcols.column_name) + FROM + user_constraints cons + INNER JOIN + user_cons_columns rcols ON rcols.constraint_name = cons.r_constraint_name AND rcols.position = 1 + LEFT OUTER JOIN + user_cons_columns cols ON cons.constraint_name = cols.constraint_name + WHERE + cons.constraint_type = 'R' AND + cons.table_name = UPPER(%s) + GROUP BY cons.constraint_name, rcols.table_name, rcols.column_name + """, [table_name]) + for constraint, columns, other_table, other_column in cursor.fetchall(): + constraints[constraint] = { + 'primary_key': False, + 'unique': False, + 'foreign_key': (other_table, other_column), + 'check': False, + 'index': False, + 'columns': columns.split(','), + } + # Now get indexes + cursor.execute(""" + SELECT + ind.index_name, + LOWER(ind.index_type), + LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.column_position), + LISTAGG(cols.descend, ',') WITHIN GROUP (ORDER BY cols.column_position) + FROM + user_ind_columns cols, user_indexes ind + WHERE + cols.table_name = UPPER(%s) AND + NOT EXISTS ( + SELECT 1 + FROM user_constraints cons + WHERE ind.index_name = cons.index_name + ) AND cols.index_name = ind.index_name + GROUP BY ind.index_name, ind.index_type + """, [table_name]) + for constraint, type_, columns, orders in cursor.fetchall(): + constraints[constraint] = { + 'primary_key': False, + 'unique': False, + 'foreign_key': None, + 'check': False, + 'index': True, + 'type': 'idx' if type_ == 'normal' else type_, + 'columns': columns.split(','), + 'orders': orders.split(','), + } + return constraints diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/operations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..3657267ad20adf31af149ee6e8d37ff8a6f8af17 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/operations.py @@ -0,0 +1,561 @@ +import datetime +import re +import uuid + +from django.conf import settings +from django.db.backends.base.operations import BaseDatabaseOperations +from django.db.backends.utils import strip_quotes, truncate_name +from django.utils import timezone +from django.utils.encoding import force_bytes + +from .base import Database +from .utils import BulkInsertMapper, InsertIdVar, Oracle_datetime + + +class DatabaseOperations(BaseDatabaseOperations): + compiler_module = "django.db.backends.oracle.compiler" + + # Oracle uses NUMBER(11) and NUMBER(19) for integer fields. + integer_field_ranges = { + 'SmallIntegerField': (-99999999999, 99999999999), + 'IntegerField': (-99999999999, 99999999999), + 'BigIntegerField': (-9999999999999999999, 9999999999999999999), + 'PositiveSmallIntegerField': (0, 99999999999), + 'PositiveIntegerField': (0, 99999999999), + } + set_operators = dict(BaseDatabaseOperations.set_operators, difference='MINUS') + + # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. + _sequence_reset_sql = """ +DECLARE + table_value integer; + seq_value integer; + seq_name user_tab_identity_cols.sequence_name%%TYPE; +BEGIN + BEGIN + SELECT sequence_name INTO seq_name FROM user_tab_identity_cols + WHERE table_name = '%(table_name)s' AND + column_name = '%(column_name)s'; + EXCEPTION WHEN NO_DATA_FOUND THEN + seq_name := '%(no_autofield_sequence_name)s'; + END; + + SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; + SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences + WHERE sequence_name = seq_name; + WHILE table_value > seq_value LOOP + EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' + INTO seq_value; + END LOOP; +END; +/""" + + # Oracle doesn't support string without precision; use the max string size. + cast_char_field_without_max_length = 'NVARCHAR2(2000)' + + def cache_key_culling_sql(self): + return """ + SELECT cache_key + FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s) + WHERE rank = %%s + 1 + """ + + def date_extract_sql(self, lookup_type, field_name): + if lookup_type == 'week_day': + # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. + return "TO_CHAR(%s, 'D')" % field_name + elif lookup_type == 'week': + # IW = ISO week number + return "TO_CHAR(%s, 'IW')" % field_name + elif lookup_type == 'quarter': + return "TO_CHAR(%s, 'Q')" % field_name + else: + # https://docs.oracle.com/database/121/SQLRF/functions067.htm#SQLRF00639 + return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) + + def date_interval_sql(self, timedelta): + """ + NUMTODSINTERVAL converts number to INTERVAL DAY TO SECOND literal. + """ + return "NUMTODSINTERVAL(%06f, 'SECOND')" % timedelta.total_seconds() + + def date_trunc_sql(self, lookup_type, field_name): + # https://docs.oracle.com/database/121/SQLRF/functions271.htm#SQLRF52058 + if lookup_type in ('year', 'month'): + return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) + elif lookup_type == 'quarter': + return "TRUNC(%s, 'Q')" % field_name + else: + return "TRUNC(%s)" % field_name + + # Oracle crashes with "ORA-03113: end-of-file on communication channel" + # if the time zone name is passed in parameter. Use interpolation instead. + # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ + # This regexp matches all time zone names from the zoneinfo database. + _tzname_re = re.compile(r'^[\w/:+-]+$') + + def _convert_field_to_tz(self, field_name, tzname): + if not settings.USE_TZ: + return field_name + if not self._tzname_re.match(tzname): + raise ValueError("Invalid time zone name: %s" % tzname) + # Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE + # and cast it back to TIMESTAMP to strip the TIME ZONE details. + return "CAST((FROM_TZ(%s, '0:00') AT TIME ZONE '%s') AS TIMESTAMP)" % (field_name, tzname) + + def datetime_cast_date_sql(self, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return 'TRUNC(%s)' % field_name + + def datetime_cast_time_sql(self, field_name, tzname): + # Since `TimeField` values are stored as TIMESTAMP where only the date + # part is ignored, convert the field to the specified timezone. + return self._convert_field_to_tz(field_name, tzname) + + def datetime_extract_sql(self, lookup_type, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return self.date_extract_sql(lookup_type, field_name) + + def datetime_trunc_sql(self, lookup_type, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + # https://docs.oracle.com/database/121/SQLRF/functions271.htm#SQLRF52058 + if lookup_type in ('year', 'month'): + sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) + elif lookup_type == 'quarter': + sql = "TRUNC(%s, 'Q')" % field_name + elif lookup_type == 'day': + sql = "TRUNC(%s)" % field_name + elif lookup_type == 'hour': + sql = "TRUNC(%s, 'HH24')" % field_name + elif lookup_type == 'minute': + sql = "TRUNC(%s, 'MI')" % field_name + else: + sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. + return sql + + def time_trunc_sql(self, lookup_type, field_name): + # The implementation is similar to `datetime_trunc_sql` as both + # `DateTimeField` and `TimeField` are stored as TIMESTAMP where + # the date part of the later is ignored. + if lookup_type == 'hour': + sql = "TRUNC(%s, 'HH24')" % field_name + elif lookup_type == 'minute': + sql = "TRUNC(%s, 'MI')" % field_name + elif lookup_type == 'second': + sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. + return sql + + def get_db_converters(self, expression): + converters = super().get_db_converters(expression) + internal_type = expression.output_field.get_internal_type() + if internal_type == 'TextField': + converters.append(self.convert_textfield_value) + elif internal_type == 'BinaryField': + converters.append(self.convert_binaryfield_value) + elif internal_type in ['BooleanField', 'NullBooleanField']: + converters.append(self.convert_booleanfield_value) + elif internal_type == 'DateTimeField': + converters.append(self.convert_datetimefield_value) + elif internal_type == 'DateField': + converters.append(self.convert_datefield_value) + elif internal_type == 'TimeField': + converters.append(self.convert_timefield_value) + elif internal_type == 'UUIDField': + converters.append(self.convert_uuidfield_value) + # Oracle stores empty strings as null. If the field accepts the empty + # string, undo this to adhere to the Django convention of using + # the empty string instead of null. + if expression.field.empty_strings_allowed: + converters.append( + self.convert_empty_bytes + if internal_type == 'BinaryField' else + self.convert_empty_string + ) + return converters + + def convert_textfield_value(self, value, expression, connection): + if isinstance(value, Database.LOB): + value = value.read() + return value + + def convert_binaryfield_value(self, value, expression, connection): + if isinstance(value, Database.LOB): + value = force_bytes(value.read()) + return value + + def convert_booleanfield_value(self, value, expression, connection): + if value in (0, 1): + value = bool(value) + return value + + # cx_Oracle always returns datetime.datetime objects for + # DATE and TIMESTAMP columns, but Django wants to see a + # python datetime.date, .time, or .datetime. + + def convert_datetimefield_value(self, value, expression, connection): + if value is not None: + if settings.USE_TZ: + value = timezone.make_aware(value, self.connection.timezone) + return value + + def convert_datefield_value(self, value, expression, connection): + if isinstance(value, Database.Timestamp): + value = value.date() + return value + + def convert_timefield_value(self, value, expression, connection): + if isinstance(value, Database.Timestamp): + value = value.time() + return value + + def convert_uuidfield_value(self, value, expression, connection): + if value is not None: + value = uuid.UUID(value) + return value + + @staticmethod + def convert_empty_string(value, expression, connection): + return '' if value is None else value + + @staticmethod + def convert_empty_bytes(value, expression, connection): + return b'' if value is None else value + + def deferrable_sql(self): + return " DEFERRABLE INITIALLY DEFERRED" + + def fetch_returned_insert_id(self, cursor): + return int(cursor._insert_id_var.getvalue()) + + def field_cast_sql(self, db_type, internal_type): + if db_type and db_type.endswith('LOB'): + return "DBMS_LOB.SUBSTR(%s)" + else: + return "%s" + + def last_executed_query(self, cursor, sql, params): + # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement + # The DB API definition does not define this attribute. + statement = cursor.statement + # Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's + # `statement` doesn't contain the query parameters. refs #20010. + return super().last_executed_query(cursor, statement, params) + + def last_insert_id(self, cursor, table_name, pk_name): + sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) + cursor.execute('"%s".currval' % sq_name) + return cursor.fetchone()[0] + + def lookup_cast(self, lookup_type, internal_type=None): + if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): + return "UPPER(%s)" + return "%s" + + def max_in_list_size(self): + return 1000 + + def max_name_length(self): + return 30 + + def pk_default_value(self): + return "NULL" + + def prep_for_iexact_query(self, x): + return x + + def process_clob(self, value): + if value is None: + return '' + return value.read() + + def quote_name(self, name): + # SQL92 requires delimited (quoted) names to be case-sensitive. When + # not quoted, Oracle has case-insensitive behavior for identifiers, but + # always defaults to uppercase. + # We simplify things by making Oracle identifiers always uppercase. + if not name.startswith('"') and not name.endswith('"'): + name = '"%s"' % truncate_name(name.upper(), self.max_name_length()) + # Oracle puts the query text into a (query % args) construct, so % signs + # in names need to be escaped. The '%%' will be collapsed back to '%' at + # that stage so we aren't really making the name longer here. + name = name.replace('%', '%%') + return name.upper() + + def random_function_sql(self): + return "DBMS_RANDOM.RANDOM" + + def regex_lookup(self, lookup_type): + if lookup_type == 'regex': + match_option = "'c'" + else: + match_option = "'i'" + return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option + + def return_insert_id(self): + return "RETURNING %s INTO %%s", (InsertIdVar(),) + + def savepoint_create_sql(self, sid): + return "SAVEPOINT " + self.quote_name(sid) + + def savepoint_rollback_sql(self, sid): + return "ROLLBACK TO SAVEPOINT " + self.quote_name(sid) + + def _foreign_key_constraints(self, table_name, recursive=False): + with self.connection.cursor() as cursor: + if recursive: + cursor.execute(""" + SELECT + user_tables.table_name, rcons.constraint_name + FROM + user_tables + JOIN + user_constraints cons + ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) + LEFT JOIN + user_constraints rcons + ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') + START WITH user_tables.table_name = UPPER(%s) + CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name + GROUP BY + user_tables.table_name, rcons.constraint_name + HAVING user_tables.table_name != UPPER(%s) + ORDER BY MAX(level) DESC + """, (table_name, table_name)) + else: + cursor.execute(""" + SELECT + cons.table_name, cons.constraint_name + FROM + user_constraints cons + WHERE + cons.constraint_type = 'R' + AND cons.table_name = UPPER(%s) + """, (table_name,)) + return cursor.fetchall() + + def sql_flush(self, style, tables, sequences, allow_cascade=False): + if tables: + truncated_tables = {table.upper() for table in tables} + constraints = set() + # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE + # foreign keys which Django doesn't define. Emulate the + # PostgreSQL behavior which truncates all dependent tables by + # manually retrieving all foreign key constraints and resolving + # dependencies. + for table in tables: + for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): + if allow_cascade: + truncated_tables.add(foreign_table) + constraints.add((foreign_table, constraint)) + sql = [ + "%s %s %s %s %s %s %s %s;" % ( + style.SQL_KEYWORD('ALTER'), + style.SQL_KEYWORD('TABLE'), + style.SQL_FIELD(self.quote_name(table)), + style.SQL_KEYWORD('DISABLE'), + style.SQL_KEYWORD('CONSTRAINT'), + style.SQL_FIELD(self.quote_name(constraint)), + style.SQL_KEYWORD('KEEP'), + style.SQL_KEYWORD('INDEX'), + ) for table, constraint in constraints + ] + [ + "%s %s %s;" % ( + style.SQL_KEYWORD('TRUNCATE'), + style.SQL_KEYWORD('TABLE'), + style.SQL_FIELD(self.quote_name(table)), + ) for table in truncated_tables + ] + [ + "%s %s %s %s %s %s;" % ( + style.SQL_KEYWORD('ALTER'), + style.SQL_KEYWORD('TABLE'), + style.SQL_FIELD(self.quote_name(table)), + style.SQL_KEYWORD('ENABLE'), + style.SQL_KEYWORD('CONSTRAINT'), + style.SQL_FIELD(self.quote_name(constraint)), + ) for table, constraint in constraints + ] + # Since we've just deleted all the rows, running our sequence + # ALTER code will reset the sequence to 0. + sql.extend(self.sequence_reset_by_name_sql(style, sequences)) + return sql + else: + return [] + + def sequence_reset_by_name_sql(self, style, sequences): + sql = [] + for sequence_info in sequences: + no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) + table = self.quote_name(sequence_info['table']) + column = self.quote_name(sequence_info['column'] or 'id') + query = self._sequence_reset_sql % { + 'no_autofield_sequence_name': no_autofield_sequence_name, + 'table': table, + 'column': column, + 'table_name': strip_quotes(table), + 'column_name': strip_quotes(column), + } + sql.append(query) + return sql + + def sequence_reset_sql(self, style, model_list): + from django.db import models + output = [] + query = self._sequence_reset_sql + for model in model_list: + for f in model._meta.local_fields: + if isinstance(f, models.AutoField): + no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) + table = self.quote_name(model._meta.db_table) + column = self.quote_name(f.column) + output.append(query % { + 'no_autofield_sequence_name': no_autofield_sequence_name, + 'table': table, + 'column': column, + 'table_name': strip_quotes(table), + 'column_name': strip_quotes(column), + }) + # Only one AutoField is allowed per model, so don't + # continue to loop + break + for f in model._meta.many_to_many: + if not f.remote_field.through: + no_autofield_sequence_name = self._get_no_autofield_sequence_name(f.m2m_db_table()) + table = self.quote_name(f.m2m_db_table()) + column = self.quote_name('id') + output.append(query % { + 'no_autofield_sequence_name': no_autofield_sequence_name, + 'table': table, + 'column': column, + 'table_name': strip_quotes(table), + 'column_name': 'ID', + }) + return output + + def start_transaction_sql(self): + return '' + + def tablespace_sql(self, tablespace, inline=False): + if inline: + return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) + else: + return "TABLESPACE %s" % self.quote_name(tablespace) + + def adapt_datefield_value(self, value): + """ + Transform a date value to an object compatible with what is expected + by the backend driver for date columns. + The default implementation transforms the date to text, but that is not + necessary for Oracle. + """ + return value + + def adapt_datetimefield_value(self, value): + """ + Transform a datetime value to an object compatible with what is expected + by the backend driver for datetime columns. + + If naive datetime is passed assumes that is in UTC. Normally Django + models.DateTimeField makes sure that if USE_TZ is True passed datetime + is timezone aware. + """ + + if value is None: + return None + + # Expression values are adapted by the database. + if hasattr(value, 'resolve_expression'): + return value + + # cx_Oracle doesn't support tz-aware datetimes + if timezone.is_aware(value): + if settings.USE_TZ: + value = timezone.make_naive(value, self.connection.timezone) + else: + raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") + + return Oracle_datetime.from_datetime(value) + + def adapt_timefield_value(self, value): + if value is None: + return None + + # Expression values are adapted by the database. + if hasattr(value, 'resolve_expression'): + return value + + if isinstance(value, str): + return datetime.datetime.strptime(value, '%H:%M:%S') + + # Oracle doesn't support tz-aware times + if timezone.is_aware(value): + raise ValueError("Oracle backend does not support timezone-aware times.") + + return Oracle_datetime(1900, 1, 1, value.hour, value.minute, + value.second, value.microsecond) + + def combine_expression(self, connector, sub_expressions): + lhs, rhs = sub_expressions + if connector == '%%': + return 'MOD(%s)' % ','.join(sub_expressions) + elif connector == '&': + return 'BITAND(%s)' % ','.join(sub_expressions) + elif connector == '|': + return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} + elif connector == '<<': + return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} + elif connector == '>>': + return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} + elif connector == '^': + return 'POWER(%s)' % ','.join(sub_expressions) + return super().combine_expression(connector, sub_expressions) + + def _get_no_autofield_sequence_name(self, table): + """ + Manually created sequence name to keep backward compatibility for + AutoFields that aren't Oracle identity columns. + """ + name_length = self.max_name_length() - 3 + return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() + + def _get_sequence_name(self, cursor, table, pk_name): + cursor.execute(""" + SELECT sequence_name + FROM user_tab_identity_cols + WHERE table_name = UPPER(%s) + AND column_name = UPPER(%s)""", [table, pk_name]) + row = cursor.fetchone() + return self._get_no_autofield_sequence_name(table) if row is None else row[0] + + def bulk_insert_sql(self, fields, placeholder_rows): + query = [] + for row in placeholder_rows: + select = [] + for i, placeholder in enumerate(row): + # A model without any fields has fields=[None]. + if fields[i]: + internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() + placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder + # Add columns aliases to the first select to avoid "ORA-00918: + # column ambiguously defined" when two or more columns in the + # first select have the same value. + if not query: + placeholder = '%s col_%s' % (placeholder, i) + select.append(placeholder) + query.append('SELECT %s FROM DUAL' % ', '.join(select)) + # Bulk insert to tables with Oracle identity columns causes Oracle to + # add sequence.nextval to it. Sequence.nextval cannot be used with the + # UNION operator. To prevent incorrect SQL, move UNION to a subquery. + return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) + + def subtract_temporals(self, internal_type, lhs, rhs): + if internal_type == 'DateField': + lhs_sql, lhs_params = lhs + rhs_sql, rhs_params = rhs + return "NUMTODSINTERVAL(%s - %s, 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params + return super().subtract_temporals(internal_type, lhs, rhs) + + def bulk_batch_size(self, fields, objs): + """Oracle restricts the number of parameters in a query.""" + if fields: + return self.connection.features.max_query_params // len(fields) + return len(objs) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/schema.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..742ff39f2cc14fb0ab27b87333ea541aff12bdd9 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/schema.py @@ -0,0 +1,165 @@ +import binascii +import copy +import datetime +import re + +from django.db.backends.base.schema import BaseDatabaseSchemaEditor +from django.db.utils import DatabaseError +from django.utils.encoding import force_text + + +class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): + + sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s" + sql_alter_column_type = "MODIFY %(column)s %(type)s" + sql_alter_column_null = "MODIFY %(column)s NULL" + sql_alter_column_not_null = "MODIFY %(column)s NOT NULL" + sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s" + sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL" + sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" + sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS" + + def quote_value(self, value): + if isinstance(value, (datetime.date, datetime.time, datetime.datetime)): + return "'%s'" % value + elif isinstance(value, str): + return "'%s'" % value.replace("\'", "\'\'") + elif isinstance(value, (bytes, bytearray, memoryview)): + return "'%s'" % force_text(binascii.hexlify(value)) + elif isinstance(value, bool): + return "1" if value else "0" + else: + return str(value) + + def remove_field(self, model, field): + # If the column is an identity column, drop the identity before + # removing the field. + if self._is_identity_column(model._meta.db_table, field.column): + self._drop_identity(model._meta.db_table, field.column) + super().remove_field(model, field) + + def delete_model(self, model): + # Run superclass action + super().delete_model(model) + # Clean up manually created sequence. + self.execute(""" + DECLARE + i INTEGER; + BEGIN + SELECT COUNT(1) INTO i FROM USER_SEQUENCES + WHERE SEQUENCE_NAME = '%(sq_name)s'; + IF i = 1 THEN + EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"'; + END IF; + END; + /""" % {'sq_name': self.connection.ops._get_no_autofield_sequence_name(model._meta.db_table)}) + + def alter_field(self, model, old_field, new_field, strict=False): + try: + super().alter_field(model, old_field, new_field, strict) + except DatabaseError as e: + description = str(e) + # If we're changing type to an unsupported type we need a + # SQLite-ish workaround + if 'ORA-22858' in description or 'ORA-22859' in description: + self._alter_field_type_workaround(model, old_field, new_field) + # If an identity column is changing to a non-numeric type, drop the + # identity first. + elif 'ORA-30675' in description: + self._drop_identity(model._meta.db_table, old_field.column) + self.alter_field(model, old_field, new_field, strict) + # If a primary key column is changing to an identity column, drop + # the primary key first. + elif 'ORA-30673' in description and old_field.primary_key: + self._delete_primary_key(model, strict=True) + self._alter_field_type_workaround(model, old_field, new_field) + else: + raise + + def _alter_field_type_workaround(self, model, old_field, new_field): + """ + Oracle refuses to change from some type to other type. + What we need to do instead is: + - Add a nullable version of the desired field with a temporary name. If + the new column is an auto field, then the temporary column can't be + nullable. + - Update the table to transfer values from old to new + - Drop old column + - Rename the new column and possibly drop the nullable property + """ + # Make a new field that's like the new one but with a temporary + # column name. + new_temp_field = copy.deepcopy(new_field) + new_temp_field.null = (new_field.get_internal_type() not in ('AutoField', 'BigAutoField')) + new_temp_field.column = self._generate_temp_name(new_field.column) + # Add it + self.add_field(model, new_temp_field) + # Explicit data type conversion + # https://docs.oracle.com/database/121/SQLRF/sql_elements002.htm#SQLRF51054 + new_value = self.quote_name(old_field.column) + old_type = old_field.db_type(self.connection) + if re.match('^N?CLOB', old_type): + new_value = "TO_CHAR(%s)" % new_value + old_type = 'VARCHAR2' + if re.match('^N?VARCHAR2', old_type): + new_internal_type = new_field.get_internal_type() + if new_internal_type == 'DateField': + new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value + elif new_internal_type == 'DateTimeField': + new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value + elif new_internal_type == 'TimeField': + # TimeField are stored as TIMESTAMP with a 1900-01-01 date part. + new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value + # Transfer values across + self.execute("UPDATE %s set %s=%s" % ( + self.quote_name(model._meta.db_table), + self.quote_name(new_temp_field.column), + new_value, + )) + # Drop the old field + self.remove_field(model, old_field) + # Rename and possibly make the new field NOT NULL + super().alter_field(model, new_temp_field, new_field) + + def normalize_name(self, name): + """ + Get the properly shortened and uppercased identifier as returned by + quote_name() but without the quotes. + """ + nn = self.quote_name(name) + if nn[0] == '"' and nn[-1] == '"': + nn = nn[1:-1] + return nn + + def _generate_temp_name(self, for_name): + """Generate temporary names for workarounds that need temp columns.""" + suffix = hex(hash(for_name)).upper()[1:] + return self.normalize_name(for_name + "_" + suffix) + + def prepare_default(self, value): + return self.quote_value(value) + + def _field_should_be_indexed(self, model, field): + create_index = super()._field_should_be_indexed(model, field) + db_type = field.db_type(self.connection) + if db_type is not None and db_type.lower() in self.connection._limited_data_types: + return False + return create_index + + def _is_identity_column(self, table_name, column_name): + with self.connection.cursor() as cursor: + cursor.execute(""" + SELECT + CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END + FROM user_tab_cols + WHERE table_name = %s AND + column_name = %s + """, [self.normalize_name(table_name), self.normalize_name(column_name)]) + row = cursor.fetchone() + return row[0] if row else False + + def _drop_identity(self, table_name, column_name): + self.execute('ALTER TABLE %(table)s MODIFY %(column)s DROP IDENTITY' % { + 'table': self.quote_name(table_name), + 'column': self.quote_name(column_name), + }) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c81d5cd7ba996efbd62b42060be29023cdb61cc --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/utils.py @@ -0,0 +1,55 @@ +import datetime + +from .base import Database + + +class InsertIdVar: + """ + A late-binding cursor variable that can be passed to Cursor.execute + as a parameter, in order to receive the id of the row created by an + insert statement. + """ + + def bind_parameter(self, cursor): + param = cursor.cursor.var(Database.NUMBER) + cursor._insert_id_var = param + return param + + +class Oracle_datetime(datetime.datetime): + """ + A datetime object, with an additional class attribute + to tell cx_Oracle to save the microseconds too. + """ + input_size = Database.TIMESTAMP + + @classmethod + def from_datetime(cls, dt): + return Oracle_datetime( + dt.year, dt.month, dt.day, + dt.hour, dt.minute, dt.second, dt.microsecond, + ) + + +class BulkInsertMapper: + BLOB = 'TO_BLOB(%s)' + DATE = 'TO_DATE(%s)' + INTERVAL = 'CAST(%s as INTERVAL DAY(9) TO SECOND(6))' + NUMBER = 'TO_NUMBER(%s)' + TIMESTAMP = 'TO_TIMESTAMP(%s)' + + types = { + 'BigIntegerField': NUMBER, + 'BinaryField': BLOB, + 'DateField': DATE, + 'DateTimeField': TIMESTAMP, + 'DecimalField': NUMBER, + 'DurationField': INTERVAL, + 'FloatField': NUMBER, + 'IntegerField': NUMBER, + 'NullBooleanField': NUMBER, + 'PositiveIntegerField': NUMBER, + 'PositiveSmallIntegerField': NUMBER, + 'SmallIntegerField': NUMBER, + 'TimeField': TIMESTAMP, + } diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/validation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/validation.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a35fd3caef1993ad58ef1eaea0f457bcdc5b52 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/oracle/validation.py @@ -0,0 +1,22 @@ +from django.core import checks +from django.db.backends.base.validation import BaseDatabaseValidation + + +class DatabaseValidation(BaseDatabaseValidation): + def check_field_type(self, field, field_type): + """Oracle doesn't support a database index on some data types.""" + errors = [] + if field.db_index and field_type.lower() in self.connection._limited_data_types: + errors.append( + checks.Warning( + 'Oracle does not support a database index on %s columns.' + % field_type, + hint=( + "An index won't be created. Silence this warning if " + "you don't care about it." + ), + obj=field, + id='fields.W162', + ) + ) + return errors diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/base.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe444988a9b4bcb1b5a1bf1f74d39e1004ced30 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/base.py @@ -0,0 +1,272 @@ +""" +PostgreSQL database backend for Django. + +Requires psycopg 2: http://initd.org/projects/psycopg2 +""" + +import threading +import warnings + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.db import DEFAULT_DB_ALIAS +from django.db.backends.base.base import BaseDatabaseWrapper +from django.db.utils import DatabaseError as WrappedDatabaseError +from django.utils.functional import cached_property +from django.utils.safestring import SafeText +from django.utils.version import get_version_tuple + +try: + import psycopg2 as Database + import psycopg2.extensions + import psycopg2.extras +except ImportError as e: + raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e) + + +def psycopg2_version(): + version = psycopg2.__version__.split(' ', 1)[0] + return get_version_tuple(version) + + +PSYCOPG2_VERSION = psycopg2_version() + +if PSYCOPG2_VERSION < (2, 5, 4): + raise ImproperlyConfigured("psycopg2_version 2.5.4 or newer is required; you have %s" % psycopg2.__version__) + + +# Some of these import psycopg2, so import them after checking if it's installed. +from .client import DatabaseClient # NOQA isort:skip +from .creation import DatabaseCreation # NOQA isort:skip +from .features import DatabaseFeatures # NOQA isort:skip +from .introspection import DatabaseIntrospection # NOQA isort:skip +from .operations import DatabaseOperations # NOQA isort:skip +from .schema import DatabaseSchemaEditor # NOQA isort:skip +from .utils import utc_tzinfo_factory # NOQA isort:skip + +psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString) +psycopg2.extras.register_uuid() + +# Register support for inet[] manually so we don't have to handle the Inet() +# object on load all the time. +INETARRAY_OID = 1041 +INETARRAY = psycopg2.extensions.new_array_type( + (INETARRAY_OID,), + 'INETARRAY', + psycopg2.extensions.UNICODE, +) +psycopg2.extensions.register_type(INETARRAY) + + +class DatabaseWrapper(BaseDatabaseWrapper): + vendor = 'postgresql' + display_name = 'PostgreSQL' + # This dictionary maps Field objects to their associated PostgreSQL column + # types, as strings. Column-type strings can contain format strings; they'll + # be interpolated against the values of Field.__dict__ before being output. + # If a column type is set to None, it won't be included in the output. + data_types = { + 'AutoField': 'serial', + 'BigAutoField': 'bigserial', + 'BinaryField': 'bytea', + 'BooleanField': 'boolean', + 'CharField': 'varchar(%(max_length)s)', + 'DateField': 'date', + 'DateTimeField': 'timestamp with time zone', + 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)', + 'DurationField': 'interval', + 'FileField': 'varchar(%(max_length)s)', + 'FilePathField': 'varchar(%(max_length)s)', + 'FloatField': 'double precision', + 'IntegerField': 'integer', + 'BigIntegerField': 'bigint', + 'IPAddressField': 'inet', + 'GenericIPAddressField': 'inet', + 'NullBooleanField': 'boolean', + 'OneToOneField': 'integer', + 'PositiveIntegerField': 'integer', + 'PositiveSmallIntegerField': 'smallint', + 'SlugField': 'varchar(%(max_length)s)', + 'SmallIntegerField': 'smallint', + 'TextField': 'text', + 'TimeField': 'time', + 'UUIDField': 'uuid', + } + data_type_check_constraints = { + 'PositiveIntegerField': '"%(column)s" >= 0', + 'PositiveSmallIntegerField': '"%(column)s" >= 0', + } + operators = { + 'exact': '= %s', + 'iexact': '= UPPER(%s)', + 'contains': 'LIKE %s', + 'icontains': 'LIKE UPPER(%s)', + 'regex': '~ %s', + 'iregex': '~* %s', + 'gt': '> %s', + 'gte': '>= %s', + 'lt': '< %s', + 'lte': '<= %s', + 'startswith': 'LIKE %s', + 'endswith': 'LIKE %s', + 'istartswith': 'LIKE UPPER(%s)', + 'iendswith': 'LIKE UPPER(%s)', + } + + # The patterns below are used to generate SQL pattern lookup clauses when + # the right-hand side of the lookup isn't a raw string (it might be an expression + # or the result of a bilateral transformation). + # In those cases, special characters for LIKE operators (e.g. \, *, _) should be + # escaped on database side. + # + # Note: we use str.format() here for readability as '%' is used as a wildcard for + # the LIKE operator. + pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" + pattern_ops = { + 'contains': "LIKE '%%' || {} || '%%'", + 'icontains': "LIKE '%%' || UPPER({}) || '%%'", + 'startswith': "LIKE {} || '%%'", + 'istartswith': "LIKE UPPER({}) || '%%'", + 'endswith': "LIKE '%%' || {}", + 'iendswith': "LIKE '%%' || UPPER({})", + } + + Database = Database + SchemaEditorClass = DatabaseSchemaEditor + # Classes instantiated in __init__(). + client_class = DatabaseClient + creation_class = DatabaseCreation + features_class = DatabaseFeatures + introspection_class = DatabaseIntrospection + ops_class = DatabaseOperations + # PostgreSQL backend-specific attributes. + _named_cursor_idx = 0 + + def get_connection_params(self): + settings_dict = self.settings_dict + # None may be used to connect to the default 'postgres' db + if settings_dict['NAME'] == '': + raise ImproperlyConfigured( + "settings.DATABASES is improperly configured. " + "Please supply the NAME value.") + conn_params = { + 'database': settings_dict['NAME'] or 'postgres', + } + conn_params.update(settings_dict['OPTIONS']) + conn_params.pop('isolation_level', None) + if settings_dict['USER']: + conn_params['user'] = settings_dict['USER'] + if settings_dict['PASSWORD']: + conn_params['password'] = settings_dict['PASSWORD'] + if settings_dict['HOST']: + conn_params['host'] = settings_dict['HOST'] + if settings_dict['PORT']: + conn_params['port'] = settings_dict['PORT'] + return conn_params + + def get_new_connection(self, conn_params): + connection = Database.connect(**conn_params) + + # self.isolation_level must be set: + # - after connecting to the database in order to obtain the database's + # default when no value is explicitly specified in options. + # - before calling _set_autocommit() because if autocommit is on, that + # will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT. + options = self.settings_dict['OPTIONS'] + try: + self.isolation_level = options['isolation_level'] + except KeyError: + self.isolation_level = connection.isolation_level + else: + # Set the isolation level to the value from OPTIONS. + if self.isolation_level != connection.isolation_level: + connection.set_session(isolation_level=self.isolation_level) + + return connection + + def ensure_timezone(self): + self.ensure_connection() + conn_timezone_name = self.connection.get_parameter_status('TimeZone') + timezone_name = self.timezone_name + if timezone_name and conn_timezone_name != timezone_name: + with self.connection.cursor() as cursor: + cursor.execute(self.ops.set_time_zone_sql(), [timezone_name]) + return True + return False + + def init_connection_state(self): + self.connection.set_client_encoding('UTF8') + + timezone_changed = self.ensure_timezone() + if timezone_changed: + # Commit after setting the time zone (see #17062) + if not self.get_autocommit(): + self.connection.commit() + + def create_cursor(self, name=None): + if name: + # In autocommit mode, the cursor will be used outside of a + # transaction, hence use a holdable cursor. + cursor = self.connection.cursor(name, scrollable=False, withhold=self.connection.autocommit) + else: + cursor = self.connection.cursor() + cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None + return cursor + + def chunked_cursor(self): + self._named_cursor_idx += 1 + return self._cursor( + name='_django_curs_%d_%d' % ( + # Avoid reusing name in other threads + threading.current_thread().ident, + self._named_cursor_idx, + ) + ) + + def _set_autocommit(self, autocommit): + with self.wrap_database_errors: + self.connection.autocommit = autocommit + + def check_constraints(self, table_names=None): + """ + Check constraints by setting them to immediate. Return them to deferred + afterward. + """ + self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') + self.cursor().execute('SET CONSTRAINTS ALL DEFERRED') + + def is_usable(self): + try: + # Use a psycopg cursor directly, bypassing Django's utilities. + self.connection.cursor().execute("SELECT 1") + except Database.Error: + return False + else: + return True + + @property + def _nodb_connection(self): + nodb_connection = super()._nodb_connection + try: + nodb_connection.ensure_connection() + except (Database.DatabaseError, WrappedDatabaseError): + warnings.warn( + "Normally Django will use a connection to the 'postgres' database " + "to avoid running initialization queries against the production " + "database when it's not needed (for example, when running tests). " + "Django was unable to create a connection to the 'postgres' database " + "and will use the default database instead.", + RuntimeWarning + ) + settings_dict = self.settings_dict.copy() + settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME'] + nodb_connection = self.__class__( + self.settings_dict.copy(), + alias=self.alias, + allow_thread_sharing=False) + return nodb_connection + + @cached_property + def pg_version(self): + with self.temporary_connection(): + return self.connection.server_version diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/client.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/client.py new file mode 100644 index 0000000000000000000000000000000000000000..6d4cc9b69293c178921fa16e5f818caa082da6a8 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/client.py @@ -0,0 +1,71 @@ +import os +import signal +import subprocess + +from django.core.files.temp import NamedTemporaryFile +from django.db.backends.base.client import BaseDatabaseClient + + +def _escape_pgpass(txt): + """ + Escape a fragment of a PostgreSQL .pgpass file. + """ + return txt.replace('\\', '\\\\').replace(':', '\\:') + + +class DatabaseClient(BaseDatabaseClient): + executable_name = 'psql' + + @classmethod + def runshell_db(cls, conn_params): + args = [cls.executable_name] + + host = conn_params.get('host', '') + port = conn_params.get('port', '') + dbname = conn_params.get('database', '') + user = conn_params.get('user', '') + passwd = conn_params.get('password', '') + + if user: + args += ['-U', user] + if host: + args += ['-h', host] + if port: + args += ['-p', str(port)] + args += [dbname] + + temp_pgpass = None + sigint_handler = signal.getsignal(signal.SIGINT) + try: + if passwd: + # Create temporary .pgpass file. + temp_pgpass = NamedTemporaryFile(mode='w+') + try: + print( + _escape_pgpass(host) or '*', + str(port) or '*', + _escape_pgpass(dbname) or '*', + _escape_pgpass(user) or '*', + _escape_pgpass(passwd), + file=temp_pgpass, + sep=':', + flush=True, + ) + os.environ['PGPASSFILE'] = temp_pgpass.name + except UnicodeEncodeError: + # If the current locale can't encode the data, let the + # user input the password manually. + pass + # Allow SIGINT to pass to psql to abort queries. + signal.signal(signal.SIGINT, signal.SIG_IGN) + subprocess.check_call(args) + finally: + # Restore the orignal SIGINT handler. + signal.signal(signal.SIGINT, sigint_handler) + if temp_pgpass: + temp_pgpass.close() + if 'PGPASSFILE' in os.environ: # unit tests need cleanup + del os.environ['PGPASSFILE'] + + def runshell(self): + DatabaseClient.runshell_db(self.connection.get_connection_params()) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/creation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..a93bdbb4a13b33d227ed63850578094572410732 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/creation.py @@ -0,0 +1,70 @@ +import sys + +from psycopg2 import errorcodes + +from django.db.backends.base.creation import BaseDatabaseCreation + + +class DatabaseCreation(BaseDatabaseCreation): + + def _quote_name(self, name): + return self.connection.ops.quote_name(name) + + def _get_database_create_suffix(self, encoding=None, template=None): + suffix = "" + if encoding: + suffix += " ENCODING '{}'".format(encoding) + if template: + suffix += " TEMPLATE {}".format(self._quote_name(template)) + if suffix: + suffix = "WITH" + suffix + return suffix + + def sql_table_creation_suffix(self): + test_settings = self.connection.settings_dict['TEST'] + assert test_settings['COLLATION'] is None, ( + "PostgreSQL does not support collation setting at database creation time." + ) + return self._get_database_create_suffix( + encoding=test_settings['CHARSET'], + template=test_settings.get('TEMPLATE'), + ) + + def _execute_create_test_db(self, cursor, parameters, keepdb=False): + try: + super()._execute_create_test_db(cursor, parameters, keepdb) + except Exception as e: + if getattr(e.__cause__, 'pgcode', '') != errorcodes.DUPLICATE_DATABASE: + # All errors except "database already exists" cancel tests. + sys.stderr.write('Got an error creating the test database: %s\n' % e) + sys.exit(2) + elif not keepdb: + # If the database should be kept, ignore "database already + # exists". + raise e + + def _clone_test_db(self, suffix, verbosity, keepdb=False): + # CREATE DATABASE ... WITH TEMPLATE ... requires closing connections + # to the template database. + self.connection.close() + + source_database_name = self.connection.settings_dict['NAME'] + target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] + test_db_params = { + 'dbname': self._quote_name(target_database_name), + 'suffix': self._get_database_create_suffix(template=source_database_name), + } + with self._nodb_connection.cursor() as cursor: + try: + self._execute_create_test_db(cursor, test_db_params, keepdb) + except Exception as e: + try: + if verbosity >= 1: + print("Destroying old test database for alias %s..." % ( + self._get_database_display_str(verbosity, target_database_name), + )) + cursor.execute('DROP DATABASE %(dbname)s' % test_db_params) + self._execute_create_test_db(cursor, test_db_params, keepdb) + except Exception as e: + sys.stderr.write("Got an error cloning the test database: %s\n" % e) + sys.exit(2) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/features.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/features.py new file mode 100644 index 0000000000000000000000000000000000000000..0349493dae61484995a93cd8fc9ac2861452f314 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/features.py @@ -0,0 +1,75 @@ +from django.db.backends.base.features import BaseDatabaseFeatures +from django.db.utils import InterfaceError +from django.utils.functional import cached_property + + +class DatabaseFeatures(BaseDatabaseFeatures): + allows_group_by_selected_pks = True + can_return_id_from_insert = True + can_return_ids_from_bulk_insert = True + has_real_datatype = True + has_native_uuid_field = True + has_native_duration_field = True + can_defer_constraint_checks = True + has_select_for_update = True + has_select_for_update_nowait = True + has_select_for_update_of = True + uses_savepoints = True + can_release_savepoints = True + supports_tablespaces = True + supports_transactions = True + can_introspect_autofield = True + can_introspect_ip_address_field = True + can_introspect_small_integer_field = True + can_distinct_on_fields = True + can_rollback_ddl = True + supports_combined_alters = True + nulls_order_largest = True + closed_cursor_error_class = InterfaceError + has_case_insensitive_like = False + requires_sqlparse_for_splitting = False + greatest_least_ignores_nulls = True + can_clone_databases = True + supports_temporal_subtraction = True + supports_slicing_ordering_in_compound = True + create_test_procedure_without_params_sql = """ + CREATE FUNCTION test_procedure () RETURNS void AS $$ + DECLARE + V_I INTEGER; + BEGIN + V_I := 1; + END; + $$ LANGUAGE plpgsql;""" + create_test_procedure_with_int_param_sql = """ + CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$ + DECLARE + V_I INTEGER; + BEGIN + V_I := P_I; + END; + $$ LANGUAGE plpgsql;""" + supports_over_clause = True + + @cached_property + def supports_aggregate_filter_clause(self): + return self.connection.pg_version >= 90400 + + @cached_property + def has_select_for_update_skip_locked(self): + return self.connection.pg_version >= 90500 + + @cached_property + def has_brin_index_support(self): + return self.connection.pg_version >= 90500 + + @cached_property + def has_jsonb_datatype(self): + return self.connection.pg_version >= 90400 + + @cached_property + def has_jsonb_agg(self): + return self.connection.pg_version >= 90500 + + @cached_property + def has_gin_pending_list_limit(self): + return self.connection.pg_version >= 90500 diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/introspection.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..1e987d17793c377237809f089e455508257d8373 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/introspection.py @@ -0,0 +1,263 @@ +import warnings + +from django.db.backends.base.introspection import ( + BaseDatabaseIntrospection, FieldInfo, TableInfo, +) +from django.db.models.indexes import Index +from django.utils.deprecation import RemovedInDjango21Warning + + +class DatabaseIntrospection(BaseDatabaseIntrospection): + # Maps type codes to Django Field types. + data_types_reverse = { + 16: 'BooleanField', + 17: 'BinaryField', + 20: 'BigIntegerField', + 21: 'SmallIntegerField', + 23: 'IntegerField', + 25: 'TextField', + 700: 'FloatField', + 701: 'FloatField', + 869: 'GenericIPAddressField', + 1042: 'CharField', # blank-padded + 1043: 'CharField', + 1082: 'DateField', + 1083: 'TimeField', + 1114: 'DateTimeField', + 1184: 'DateTimeField', + 1266: 'TimeField', + 1700: 'DecimalField', + 2950: 'UUIDField', + } + + ignored_tables = [] + + _get_indexes_query = """ + SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary + FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, + pg_catalog.pg_index idx, pg_catalog.pg_attribute attr + WHERE c.oid = idx.indrelid + AND idx.indexrelid = c2.oid + AND attr.attrelid = c.oid + AND attr.attnum = idx.indkey[0] + AND c.relname = %s""" + + def get_field_type(self, data_type, description): + field_type = super().get_field_type(data_type, description) + if description.default and 'nextval' in description.default: + if field_type == 'IntegerField': + return 'AutoField' + elif field_type == 'BigIntegerField': + return 'BigAutoField' + return field_type + + def get_table_list(self, cursor): + """Return a list of table and view names in the current database.""" + cursor.execute(""" + SELECT c.relname, c.relkind + FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE c.relkind IN ('r', 'v') + AND n.nspname NOT IN ('pg_catalog', 'pg_toast') + AND pg_catalog.pg_table_is_visible(c.oid)""") + return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1])) + for row in cursor.fetchall() + if row[0] not in self.ignored_tables] + + def get_table_description(self, cursor, table_name): + """ + Return a description of the table with the DB-API cursor.description + interface. + """ + # As cursor.description does not return reliably the nullable property, + # we have to query the information_schema (#7783) + cursor.execute(""" + SELECT column_name, is_nullable, column_default + FROM information_schema.columns + WHERE table_name = %s""", [table_name]) + field_map = {line[0]: line[1:] for line in cursor.fetchall()} + cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) + return [ + FieldInfo(*(line[0:6] + (field_map[line.name][0] == 'YES', field_map[line.name][1]))) + for line in cursor.description + ] + + def get_sequences(self, cursor, table_name, table_fields=()): + sequences = [] + cursor.execute(""" + SELECT s.relname as sequence_name, col.attname + FROM pg_class s + JOIN pg_namespace sn ON sn.oid = s.relnamespace + JOIN pg_depend d ON d.refobjid = s.oid AND d.refclassid='pg_class'::regclass + JOIN pg_attrdef ad ON ad.oid = d.objid AND d.classid = 'pg_attrdef'::regclass + JOIN pg_attribute col ON col.attrelid = ad.adrelid AND col.attnum = ad.adnum + JOIN pg_class tbl ON tbl.oid = ad.adrelid + JOIN pg_namespace n ON n.oid = tbl.relnamespace + WHERE s.relkind = 'S' + AND d.deptype in ('a', 'n') + AND n.nspname = 'public' + AND tbl.relname = %s + """, [table_name]) + for row in cursor.fetchall(): + sequences.append({'name': row[0], 'table': table_name, 'column': row[1]}) + return sequences + + def get_relations(self, cursor, table_name): + """ + Return a dictionary of {field_name: (field_name_other_table, other_table)} + representing all relationships to the given table. + """ + cursor.execute(""" + SELECT c2.relname, a1.attname, a2.attname + FROM pg_constraint con + LEFT JOIN pg_class c1 ON con.conrelid = c1.oid + LEFT JOIN pg_class c2 ON con.confrelid = c2.oid + LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1] + LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1] + WHERE c1.relname = %s + AND con.contype = 'f'""", [table_name]) + relations = {} + for row in cursor.fetchall(): + relations[row[1]] = (row[2], row[0]) + return relations + + def get_key_columns(self, cursor, table_name): + key_columns = [] + cursor.execute(""" + SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column + FROM information_schema.constraint_column_usage ccu + LEFT JOIN information_schema.key_column_usage kcu + ON ccu.constraint_catalog = kcu.constraint_catalog + AND ccu.constraint_schema = kcu.constraint_schema + AND ccu.constraint_name = kcu.constraint_name + LEFT JOIN information_schema.table_constraints tc + ON ccu.constraint_catalog = tc.constraint_catalog + AND ccu.constraint_schema = tc.constraint_schema + AND ccu.constraint_name = tc.constraint_name + WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name]) + key_columns.extend(cursor.fetchall()) + return key_columns + + def get_indexes(self, cursor, table_name): + warnings.warn( + "get_indexes() is deprecated in favor of get_constraints().", + RemovedInDjango21Warning, stacklevel=2 + ) + # This query retrieves each index on the given table, including the + # first associated field name + cursor.execute(self._get_indexes_query, [table_name]) + indexes = {} + for row in cursor.fetchall(): + # row[1] (idx.indkey) is stored in the DB as an array. It comes out as + # a string of space-separated integers. This designates the field + # indexes (1-based) of the fields that have indexes on the table. + # Here, we skip any indexes across multiple fields. + if ' ' in row[1]: + continue + if row[0] not in indexes: + indexes[row[0]] = {'primary_key': False, 'unique': False} + # It's possible to have the unique and PK constraints in separate indexes. + if row[3]: + indexes[row[0]]['primary_key'] = True + if row[2]: + indexes[row[0]]['unique'] = True + return indexes + + def get_constraints(self, cursor, table_name): + """ + Retrieve any constraints or keys (unique, pk, fk, check, index) across + one or more columns. Also retrieve the definition of expression-based + indexes. + """ + constraints = {} + # Loop over the key table, collecting things as constraints. The column + # array must return column names in the same order in which they were + # created. + # The subquery containing generate_series can be replaced with + # "WITH ORDINALITY" when support for PostgreSQL 9.3 is dropped. + cursor.execute(""" + SELECT + c.conname, + array( + SELECT attname + FROM ( + SELECT unnest(c.conkey) AS colid, + generate_series(1, array_length(c.conkey, 1)) AS arridx + ) AS cols + JOIN pg_attribute AS ca ON cols.colid = ca.attnum + WHERE ca.attrelid = c.conrelid + ORDER BY cols.arridx + ), + c.contype, + (SELECT fkc.relname || '.' || fka.attname + FROM pg_attribute AS fka + JOIN pg_class AS fkc ON fka.attrelid = fkc.oid + WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]), + cl.reloptions + FROM pg_constraint AS c + JOIN pg_class AS cl ON c.conrelid = cl.oid + JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid + WHERE ns.nspname = %s AND cl.relname = %s + """, ["public", table_name]) + for constraint, columns, kind, used_cols, options in cursor.fetchall(): + constraints[constraint] = { + "columns": columns, + "primary_key": kind == "p", + "unique": kind in ["p", "u"], + "foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None, + "check": kind == "c", + "index": False, + "definition": None, + "options": options, + } + # Now get indexes + # The row_number() function for ordering the index fields can be + # replaced by WITH ORDINALITY in the unnest() functions when support + # for PostgreSQL 9.3 is dropped. + cursor.execute(""" + SELECT + indexname, array_agg(attname ORDER BY rnum), indisunique, indisprimary, + array_agg(ordering ORDER BY rnum), amname, exprdef, s2.attoptions + FROM ( + SELECT + row_number() OVER () as rnum, c2.relname as indexname, + idx.*, attr.attname, am.amname, + CASE + WHEN idx.indexprs IS NOT NULL THEN + pg_get_indexdef(idx.indexrelid) + END AS exprdef, + CASE am.amname + WHEN 'btree' THEN + CASE (option & 1) + WHEN 1 THEN 'DESC' ELSE 'ASC' + END + END as ordering, + c2.reloptions as attoptions + FROM ( + SELECT + *, unnest(i.indkey) as key, unnest(i.indoption) as option + FROM pg_index i + ) idx + LEFT JOIN pg_class c ON idx.indrelid = c.oid + LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid + LEFT JOIN pg_am am ON c2.relam = am.oid + LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key + WHERE c.relname = %s + ) s2 + GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions; + """, [table_name]) + for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall(): + if index not in constraints: + constraints[index] = { + "columns": columns if columns != [None] else [], + "orders": orders if orders != [None] else [], + "primary_key": primary, + "unique": unique, + "foreign_key": None, + "check": False, + "index": True, + "type": Index.suffix if type_ == 'btree' else type_, + "definition": definition, + "options": options, + } + return constraints diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/operations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..c4a61f7070a12bf207f7ba370d4a0297f8737ab1 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/operations.py @@ -0,0 +1,261 @@ +from psycopg2.extras import Inet + +from django.conf import settings +from django.db import NotSupportedError +from django.db.backends.base.operations import BaseDatabaseOperations + + +class DatabaseOperations(BaseDatabaseOperations): + cast_char_field_without_max_length = 'varchar' + + def unification_cast_sql(self, output_field): + internal_type = output_field.get_internal_type() + if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): + # PostgreSQL will resolve a union as type 'text' if input types are + # 'unknown'. + # https://www.postgresql.org/docs/current/static/typeconv-union-case.html + # These fields cannot be implicitly cast back in the default + # PostgreSQL configuration so we need to explicitly cast them. + # We must also remove components of the type within brackets: + # varchar(255) -> varchar. + return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] + return '%s' + + def date_extract_sql(self, lookup_type, field_name): + # https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT + if lookup_type == 'week_day': + # For consistency across backends, we return Sunday=1, Saturday=7. + return "EXTRACT('dow' FROM %s) + 1" % field_name + else: + return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) + + def date_trunc_sql(self, lookup_type, field_name): + # https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC + return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) + + def _convert_field_to_tz(self, field_name, tzname): + if settings.USE_TZ: + field_name = "%s AT TIME ZONE '%s'" % (field_name, tzname) + return field_name + + def datetime_cast_date_sql(self, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return '(%s)::date' % field_name + + def datetime_cast_time_sql(self, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return '(%s)::time' % field_name + + def datetime_extract_sql(self, lookup_type, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + return self.date_extract_sql(lookup_type, field_name) + + def datetime_trunc_sql(self, lookup_type, field_name, tzname): + field_name = self._convert_field_to_tz(field_name, tzname) + # https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC + return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) + + def time_trunc_sql(self, lookup_type, field_name): + return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) + + def deferrable_sql(self): + return " DEFERRABLE INITIALLY DEFERRED" + + def fetch_returned_insert_ids(self, cursor): + """ + Given a cursor object that has just performed an INSERT...RETURNING + statement into a table that has an auto-incrementing ID, return the + list of newly created IDs. + """ + return [item[0] for item in cursor.fetchall()] + + def lookup_cast(self, lookup_type, internal_type=None): + lookup = '%s' + + # Cast text lookups to text to allow things like filter(x__contains=4) + if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', + 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): + if internal_type in ('IPAddressField', 'GenericIPAddressField'): + lookup = "HOST(%s)" + elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): + lookup = '%s::citext' + else: + lookup = "%s::text" + + # Use UPPER(x) for case-insensitive lookups; it's faster. + if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): + lookup = 'UPPER(%s)' % lookup + + return lookup + + def no_limit_value(self): + return None + + def prepare_sql_script(self, sql): + return [sql] + + def quote_name(self, name): + if name.startswith('"') and name.endswith('"'): + return name # Quoting once is enough. + return '"%s"' % name + + def set_time_zone_sql(self): + return "SET TIME ZONE %s" + + def sql_flush(self, style, tables, sequences, allow_cascade=False): + if tables: + # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows + # us to truncate tables referenced by a foreign key in any other + # table. + tables_sql = ', '.join( + style.SQL_FIELD(self.quote_name(table)) for table in tables) + if allow_cascade: + sql = ['%s %s %s;' % ( + style.SQL_KEYWORD('TRUNCATE'), + tables_sql, + style.SQL_KEYWORD('CASCADE'), + )] + else: + sql = ['%s %s;' % ( + style.SQL_KEYWORD('TRUNCATE'), + tables_sql, + )] + sql.extend(self.sequence_reset_by_name_sql(style, sequences)) + return sql + else: + return [] + + def sequence_reset_by_name_sql(self, style, sequences): + # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements + # to reset sequence indices + sql = [] + for sequence_info in sequences: + table_name = sequence_info['table'] + column_name = sequence_info['column'] + if not (column_name and len(column_name) > 0): + # This will be the case if it's an m2m using an autogenerated + # intermediate table (see BaseDatabaseIntrospection.sequence_list) + column_name = 'id' + sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( + style.SQL_KEYWORD('SELECT'), + style.SQL_TABLE(self.quote_name(table_name)), + style.SQL_FIELD(column_name), + )) + return sql + + def tablespace_sql(self, tablespace, inline=False): + if inline: + return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) + else: + return "TABLESPACE %s" % self.quote_name(tablespace) + + def sequence_reset_sql(self, style, model_list): + from django.db import models + output = [] + qn = self.quote_name + for model in model_list: + # Use `coalesce` to set the sequence for each model to the max pk value if there are records, + # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true + # if there are records (as the max pk value is already in use), otherwise set it to false. + # Use pg_get_serial_sequence to get the underlying sequence name from the table name + # and column name (available since PostgreSQL 8) + + for f in model._meta.local_fields: + if isinstance(f, models.AutoField): + output.append( + "%s setval(pg_get_serial_sequence('%s','%s'), " + "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( + style.SQL_KEYWORD('SELECT'), + style.SQL_TABLE(qn(model._meta.db_table)), + style.SQL_FIELD(f.column), + style.SQL_FIELD(qn(f.column)), + style.SQL_FIELD(qn(f.column)), + style.SQL_KEYWORD('IS NOT'), + style.SQL_KEYWORD('FROM'), + style.SQL_TABLE(qn(model._meta.db_table)), + ) + ) + break # Only one AutoField is allowed per model, so don't bother continuing. + for f in model._meta.many_to_many: + if not f.remote_field.through: + output.append( + "%s setval(pg_get_serial_sequence('%s','%s'), " + "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( + style.SQL_KEYWORD('SELECT'), + style.SQL_TABLE(qn(f.m2m_db_table())), + style.SQL_FIELD('id'), + style.SQL_FIELD(qn('id')), + style.SQL_FIELD(qn('id')), + style.SQL_KEYWORD('IS NOT'), + style.SQL_KEYWORD('FROM'), + style.SQL_TABLE(qn(f.m2m_db_table())) + ) + ) + return output + + def prep_for_iexact_query(self, x): + return x + + def max_name_length(self): + """ + Return the maximum length of an identifier. + + The maximum length of an identifier is 63 by default, but can be + changed by recompiling PostgreSQL after editing the NAMEDATALEN + macro in src/include/pg_config_manual.h. + + This implementation returns 63, but can be overridden by a custom + database backend that inherits most of its behavior from this one. + """ + return 63 + + def distinct_sql(self, fields): + if fields: + return 'DISTINCT ON (%s)' % ', '.join(fields) + else: + return 'DISTINCT' + + def last_executed_query(self, cursor, sql, params): + # http://initd.org/psycopg/docs/cursor.html#cursor.query + # The query attribute is a Psycopg extension to the DB API 2.0. + if cursor.query is not None: + return cursor.query.decode() + return None + + def return_insert_id(self): + return "RETURNING %s", () + + def bulk_insert_sql(self, fields, placeholder_rows): + placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) + values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) + return "VALUES " + values_sql + + def adapt_datefield_value(self, value): + return value + + def adapt_datetimefield_value(self, value): + return value + + def adapt_timefield_value(self, value): + return value + + def adapt_ipaddressfield_value(self, value): + if value: + return Inet(value) + return None + + def subtract_temporals(self, internal_type, lhs, rhs): + if internal_type == 'DateField': + lhs_sql, lhs_params = lhs + rhs_sql, rhs_params = rhs + return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), lhs_params + rhs_params + return super().subtract_temporals(internal_type, lhs, rhs) + + def window_frame_range_start_end(self, start=None, end=None): + start_, end_ = super().window_frame_range_start_end(start, end) + if (start and start < 0) or (end and end > 0): + raise NotSupportedError( + 'PostgreSQL only supports UNBOUNDED together with PRECEDING ' + 'and FOLLOWING.' + ) + return start_, end_ diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/schema.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..18388cc5237cd3bbc65a4ffcbebf3ca830bcc7ba --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/schema.py @@ -0,0 +1,134 @@ +import psycopg2 + +from django.db.backends.base.schema import BaseDatabaseSchemaEditor + + +class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): + + sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s USING %(column)s::%(type)s" + + sql_create_sequence = "CREATE SEQUENCE %(sequence)s" + sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE" + sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s" + + sql_create_index = "CREATE INDEX %(name)s ON %(table)s%(using)s (%(columns)s)%(extra)s" + sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s" + sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s" + sql_delete_index = "DROP INDEX IF EXISTS %(name)s" + + # Setting the constraint to IMMEDIATE runs any deferred checks to allow + # dropping it in the same transaction. + sql_delete_fk = "SET CONSTRAINTS %(name)s IMMEDIATE; ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" + + sql_delete_procedure = 'DROP FUNCTION %(procedure)s(%(param_types)s)' + + def quote_value(self, value): + return psycopg2.extensions.adapt(value) + + def _field_indexes_sql(self, model, field): + output = super()._field_indexes_sql(model, field) + like_index_statement = self._create_like_index_sql(model, field) + if like_index_statement is not None: + output.append(like_index_statement) + return output + + def _create_like_index_sql(self, model, field): + """ + Return the statement to create an index with varchar operator pattern + when the column type is 'varchar' or 'text', otherwise return None. + """ + db_type = field.db_type(connection=self.connection) + if db_type is not None and (field.db_index or field.unique): + # Fields with database column types of `varchar` and `text` need + # a second index that specifies their operator class, which is + # needed when performing correct LIKE queries outside the + # C locale. See #12234. + # + # The same doesn't apply to array fields such as varchar[size] + # and text[size], so skip them. + if '[' in db_type: + return None + if db_type.startswith('varchar'): + return self._create_index_sql(model, [field], suffix='_like', sql=self.sql_create_varchar_index) + elif db_type.startswith('text'): + return self._create_index_sql(model, [field], suffix='_like', sql=self.sql_create_text_index) + return None + + def _alter_column_type_sql(self, model, old_field, new_field, new_type): + """Make ALTER TYPE with SERIAL make sense.""" + table = model._meta.db_table + if new_type.lower() in ("serial", "bigserial"): + column = new_field.column + sequence_name = "%s_%s_seq" % (table, column) + col_type = "integer" if new_type.lower() == "serial" else "bigint" + return ( + ( + self.sql_alter_column_type % { + "column": self.quote_name(column), + "type": col_type, + }, + [], + ), + [ + ( + self.sql_delete_sequence % { + "sequence": self.quote_name(sequence_name), + }, + [], + ), + ( + self.sql_create_sequence % { + "sequence": self.quote_name(sequence_name), + }, + [], + ), + ( + self.sql_alter_column % { + "table": self.quote_name(table), + "changes": self.sql_alter_column_default % { + "column": self.quote_name(column), + "default": "nextval('%s')" % self.quote_name(sequence_name), + } + }, + [], + ), + ( + self.sql_set_sequence_max % { + "table": self.quote_name(table), + "column": self.quote_name(column), + "sequence": self.quote_name(sequence_name), + }, + [], + ), + ], + ) + else: + return super()._alter_column_type_sql(model, old_field, new_field, new_type) + + def _alter_field(self, model, old_field, new_field, old_type, new_type, + old_db_params, new_db_params, strict=False): + # Drop indexes on varchar/text/citext columns that are changing to a + # different type. + if (old_field.db_index or old_field.unique) and ( + (old_type.startswith('varchar') and not new_type.startswith('varchar')) or + (old_type.startswith('text') and not new_type.startswith('text')) or + (old_type.startswith('citext') and not new_type.startswith('citext')) + ): + index_name = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') + self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) + + super()._alter_field( + model, old_field, new_field, old_type, new_type, old_db_params, + new_db_params, strict, + ) + # Added an index? Create any PostgreSQL-specific indexes. + if ((not (old_field.db_index or old_field.unique) and new_field.db_index) or + (not old_field.unique and new_field.unique)): + like_index_statement = self._create_like_index_sql(model, new_field) + if like_index_statement is not None: + self.execute(like_index_statement) + + # Removed an index? Drop any PostgreSQL-specific indexes. + if old_field.unique and not (new_field.db_index or new_field.unique): + index_to_remove = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') + self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_to_remove)) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2c03ab36cdda75da468ef92808905362a0eb4a2a --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql/utils.py @@ -0,0 +1,7 @@ +from django.utils.timezone import utc + + +def utc_tzinfo_factory(offset): + if offset != 0: + raise AssertionError("database connection isn't set to UTC") + return utc diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db97c5bcc9d42b24701f125ea7da9afb9a00e7bc --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/__init__.py @@ -0,0 +1,9 @@ +import warnings + +from django.utils.deprecation import RemovedInDjango30Warning + +warnings.warn( + "The django.db.backends.postgresql_psycopg2 module is deprecated in " + "favor of django.db.backends.postgresql.", + RemovedInDjango30Warning, stacklevel=2 +) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/base.py new file mode 100644 index 0000000000000000000000000000000000000000..967768434e819012fba68406e79a020ab1361ed9 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/base.py @@ -0,0 +1 @@ +from ..postgresql.base import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/client.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/client.py new file mode 100644 index 0000000000000000000000000000000000000000..2bf134bcaf68e7c57bc5a8dfaf98b08fac053559 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/client.py @@ -0,0 +1 @@ +from ..postgresql.client import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/creation.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..aaec84e7a87bae30635470014a04ff668411de83 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/creation.py @@ -0,0 +1 @@ +from ..postgresql.creation import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/features.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/features.py new file mode 100644 index 0000000000000000000000000000000000000000..3582a1754e9cb0942bef2b7ae7980ff5bac90b06 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/features.py @@ -0,0 +1 @@ +from ..postgresql.features import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/introspection.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..1191bb2f5a9b643e4c5ce0d31c35a16e22e50d96 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/introspection.py @@ -0,0 +1 @@ +from ..postgresql.introspection import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/operations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..a45f5e5611abce949a7a279e326108e23bb1a2b4 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/operations.py @@ -0,0 +1 @@ +from ..postgresql.operations import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/schema.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..b0263d15b79b9e38d796046ca66efd726c5ee625 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/schema.py @@ -0,0 +1 @@ +from ..postgresql.schema import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c3e6ffe254990002143d54c26ff400f594045627 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/postgresql_psycopg2/utils.py @@ -0,0 +1 @@ +from ..postgresql.utils import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/signals.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/signals.py new file mode 100644 index 0000000000000000000000000000000000000000..c16a63f9f67f9e8417f1ee16608b422a9a015522 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/signals.py @@ -0,0 +1,3 @@ +from django.dispatch import Signal + +connection_created = Signal(providing_args=["connection"]) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/base.py new file mode 100644 index 0000000000000000000000000000000000000000..eabcdfabe5a9c53d1f33851a1f5b310ba2e2c81c --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/base.py @@ -0,0 +1,485 @@ +""" +SQLite3 backend for the sqlite3 module in the standard library. +""" +import decimal +import math +import re +import warnings +from sqlite3 import dbapi2 as Database + +import pytz + +from django.core.exceptions import ImproperlyConfigured +from django.db import utils +from django.db.backends import utils as backend_utils +from django.db.backends.base.base import BaseDatabaseWrapper +from django.utils import timezone +from django.utils.dateparse import ( + parse_date, parse_datetime, parse_duration, parse_time, +) + +from .client import DatabaseClient # isort:skip +from .creation import DatabaseCreation # isort:skip +from .features import DatabaseFeatures # isort:skip +from .introspection import DatabaseIntrospection # isort:skip +from .operations import DatabaseOperations # isort:skip +from .schema import DatabaseSchemaEditor # isort:skip + + +def decoder(conv_func): + """ + Convert bytestrings from Python's sqlite3 interface to a regular string. + """ + return lambda s: conv_func(s.decode()) + + +Database.register_converter("bool", lambda s: s == b'1') +Database.register_converter("time", decoder(parse_time)) +Database.register_converter("date", decoder(parse_date)) +Database.register_converter("datetime", decoder(parse_datetime)) +Database.register_converter("timestamp", decoder(parse_datetime)) +Database.register_converter("TIMESTAMP", decoder(parse_datetime)) +Database.register_converter("decimal", decoder(decimal.Decimal)) + +Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal) + + +class DatabaseWrapper(BaseDatabaseWrapper): + vendor = 'sqlite' + display_name = 'SQLite' + # SQLite doesn't actually support most of these types, but it "does the right + # thing" given more verbose field definitions, so leave them as is so that + # schema inspection is more useful. + data_types = { + 'AutoField': 'integer', + 'BigAutoField': 'integer', + 'BinaryField': 'BLOB', + 'BooleanField': 'bool', + 'CharField': 'varchar(%(max_length)s)', + 'DateField': 'date', + 'DateTimeField': 'datetime', + 'DecimalField': 'decimal', + 'DurationField': 'bigint', + 'FileField': 'varchar(%(max_length)s)', + 'FilePathField': 'varchar(%(max_length)s)', + 'FloatField': 'real', + 'IntegerField': 'integer', + 'BigIntegerField': 'bigint', + 'IPAddressField': 'char(15)', + 'GenericIPAddressField': 'char(39)', + 'NullBooleanField': 'bool', + 'OneToOneField': 'integer', + 'PositiveIntegerField': 'integer unsigned', + 'PositiveSmallIntegerField': 'smallint unsigned', + 'SlugField': 'varchar(%(max_length)s)', + 'SmallIntegerField': 'smallint', + 'TextField': 'text', + 'TimeField': 'time', + 'UUIDField': 'char(32)', + } + data_types_suffix = { + 'AutoField': 'AUTOINCREMENT', + 'BigAutoField': 'AUTOINCREMENT', + } + # SQLite requires LIKE statements to include an ESCAPE clause if the value + # being escaped has a percent or underscore in it. + # See http://www.sqlite.org/lang_expr.html for an explanation. + operators = { + 'exact': '= %s', + 'iexact': "LIKE %s ESCAPE '\\'", + 'contains': "LIKE %s ESCAPE '\\'", + 'icontains': "LIKE %s ESCAPE '\\'", + 'regex': 'REGEXP %s', + 'iregex': "REGEXP '(?i)' || %s", + 'gt': '> %s', + 'gte': '>= %s', + 'lt': '< %s', + 'lte': '<= %s', + 'startswith': "LIKE %s ESCAPE '\\'", + 'endswith': "LIKE %s ESCAPE '\\'", + 'istartswith': "LIKE %s ESCAPE '\\'", + 'iendswith': "LIKE %s ESCAPE '\\'", + } + + # The patterns below are used to generate SQL pattern lookup clauses when + # the right-hand side of the lookup isn't a raw string (it might be an expression + # or the result of a bilateral transformation). + # In those cases, special characters for LIKE operators (e.g. \, *, _) should be + # escaped on database side. + # + # Note: we use str.format() here for readability as '%' is used as a wildcard for + # the LIKE operator. + pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" + pattern_ops = { + 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", + 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", + 'startswith': r"LIKE {} || '%%' ESCAPE '\'", + 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", + 'endswith': r"LIKE '%%' || {} ESCAPE '\'", + 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", + } + + Database = Database + SchemaEditorClass = DatabaseSchemaEditor + # Classes instantiated in __init__(). + client_class = DatabaseClient + creation_class = DatabaseCreation + features_class = DatabaseFeatures + introspection_class = DatabaseIntrospection + ops_class = DatabaseOperations + + def get_connection_params(self): + settings_dict = self.settings_dict + if not settings_dict['NAME']: + raise ImproperlyConfigured( + "settings.DATABASES is improperly configured. " + "Please supply the NAME value.") + kwargs = { + 'database': settings_dict['NAME'], + 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, + } + kwargs.update(settings_dict['OPTIONS']) + # Always allow the underlying SQLite connection to be shareable + # between multiple threads. The safe-guarding will be handled at a + # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` + # property. This is necessary as the shareability is disabled by + # default in pysqlite and it cannot be changed once a connection is + # opened. + if 'check_same_thread' in kwargs and kwargs['check_same_thread']: + warnings.warn( + 'The `check_same_thread` option was provided and set to ' + 'True. It will be overridden with False. Use the ' + '`DatabaseWrapper.allow_thread_sharing` property instead ' + 'for controlling thread shareability.', + RuntimeWarning + ) + kwargs.update({'check_same_thread': False}) + if self.features.can_share_in_memory_db: + kwargs.update({'uri': True}) + return kwargs + + def get_new_connection(self, conn_params): + conn = Database.connect(**conn_params) + conn.create_function("django_date_extract", 2, _sqlite_date_extract) + conn.create_function("django_date_trunc", 2, _sqlite_date_trunc) + conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date) + conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time) + conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract) + conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc) + conn.create_function("django_time_extract", 2, _sqlite_time_extract) + conn.create_function("django_time_trunc", 2, _sqlite_time_trunc) + conn.create_function("django_time_diff", 2, _sqlite_time_diff) + conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff) + conn.create_function("regexp", 2, _sqlite_regexp) + conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) + conn.create_function("django_power", 2, _sqlite_power) + conn.execute('PRAGMA foreign_keys = ON') + return conn + + def init_connection_state(self): + pass + + def create_cursor(self, name=None): + return self.connection.cursor(factory=SQLiteCursorWrapper) + + def close(self): + self.validate_thread_sharing() + # If database is in memory, closing the connection destroys the + # database. To prevent accidental data loss, ignore close requests on + # an in-memory db. + if not self.is_in_memory_db(): + BaseDatabaseWrapper.close(self) + + def _savepoint_allowed(self): + # Two conditions are required here: + # - A sufficiently recent version of SQLite to support savepoints, + # - Being in a transaction, which can only happen inside 'atomic'. + + # When 'isolation_level' is not None, sqlite3 commits before each + # savepoint; it's a bug. When it is None, savepoints don't make sense + # because autocommit is enabled. The only exception is inside 'atomic' + # blocks. To work around that bug, on SQLite, 'atomic' starts a + # transaction explicitly rather than simply disable autocommit. + return self.features.uses_savepoints and self.in_atomic_block + + def _set_autocommit(self, autocommit): + if autocommit: + level = None + else: + # sqlite3's internal default is ''. It's different from None. + # See Modules/_sqlite/connection.c. + level = '' + # 'isolation_level' is a misleading API. + # SQLite always runs at the SERIALIZABLE isolation level. + with self.wrap_database_errors: + self.connection.isolation_level = level + + def disable_constraint_checking(self): + if self.in_atomic_block: + # sqlite3 cannot disable constraint checking inside a transaction. + return False + self.cursor().execute('PRAGMA foreign_keys = OFF') + return True + + def enable_constraint_checking(self): + self.cursor().execute('PRAGMA foreign_keys = ON') + + def check_constraints(self, table_names=None): + """ + Check each table name in `table_names` for rows with invalid foreign + key references. This method is intended to be used in conjunction with + `disable_constraint_checking()` and `enable_constraint_checking()`, to + determine if rows with invalid references were entered while constraint + checks were off. + + Raise an IntegrityError on the first invalid foreign key reference + encountered (if any) and provide detailed information about the + invalid reference in the error message. + + Backends can override this method if they can more directly apply + constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE") + """ + cursor = self.cursor() + if table_names is None: + table_names = self.introspection.table_names(cursor) + for table_name in table_names: + primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) + if not primary_key_column_name: + continue + key_columns = self.introspection.get_key_columns(cursor, table_name) + for column_name, referenced_table_name, referenced_column_name in key_columns: + cursor.execute( + """ + SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING + LEFT JOIN `%s` as REFERRED + ON (REFERRING.`%s` = REFERRED.`%s`) + WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL + """ + % ( + primary_key_column_name, column_name, table_name, + referenced_table_name, column_name, referenced_column_name, + column_name, referenced_column_name, + ) + ) + for bad_row in cursor.fetchall(): + raise utils.IntegrityError( + "The row in table '%s' with primary key '%s' has an " + "invalid foreign key: %s.%s contains a value '%s' that " + "does not have a corresponding value in %s.%s." % ( + table_name, bad_row[0], table_name, column_name, + bad_row[1], referenced_table_name, referenced_column_name, + ) + ) + + def is_usable(self): + return True + + def _start_transaction_under_autocommit(self): + """ + Start a transaction explicitly in autocommit mode. + + Staying in autocommit mode works around a bug of sqlite3 that breaks + savepoints when autocommit is disabled. + """ + self.cursor().execute("BEGIN") + + def is_in_memory_db(self): + return self.creation.is_in_memory_db(self.settings_dict['NAME']) + + +FORMAT_QMARK_REGEX = re.compile(r'(?= 1: + print("Destroying old test database for alias %s..." % ( + self._get_database_display_str(verbosity, test_database_name), + )) + if os.access(test_database_name, os.F_OK): + if not autoclobber: + confirm = input( + "Type 'yes' if you would like to try deleting the test " + "database '%s', or 'no' to cancel: " % test_database_name + ) + if autoclobber or confirm == 'yes': + try: + os.remove(test_database_name) + except Exception as e: + sys.stderr.write("Got an error deleting the old test database: %s\n" % e) + sys.exit(2) + else: + print("Tests cancelled.") + sys.exit(1) + return test_database_name + + def get_test_db_clone_settings(self, suffix): + orig_settings_dict = self.connection.settings_dict + source_database_name = orig_settings_dict['NAME'] + if self.is_in_memory_db(source_database_name): + return orig_settings_dict + else: + new_settings_dict = orig_settings_dict.copy() + root, ext = os.path.splitext(orig_settings_dict['NAME']) + new_settings_dict['NAME'] = '{}_{}.{}'.format(root, suffix, ext) + return new_settings_dict + + def _clone_test_db(self, suffix, verbosity, keepdb=False): + source_database_name = self.connection.settings_dict['NAME'] + target_database_name = self.get_test_db_clone_settings(suffix)['NAME'] + # Forking automatically makes a copy of an in-memory database. + if not self.is_in_memory_db(source_database_name): + # Erase the old test database + if os.access(target_database_name, os.F_OK): + if keepdb: + return + if verbosity >= 1: + print("Destroying old test database for alias %s..." % ( + self._get_database_display_str(verbosity, target_database_name), + )) + try: + os.remove(target_database_name) + except Exception as e: + sys.stderr.write("Got an error deleting the old test database: %s\n" % e) + sys.exit(2) + try: + shutil.copy(source_database_name, target_database_name) + except Exception as e: + sys.stderr.write("Got an error cloning the test database: %s\n" % e) + sys.exit(2) + + def _destroy_test_db(self, test_database_name, verbosity): + if test_database_name and not self.is_in_memory_db(test_database_name): + # Remove the SQLite database file + os.remove(test_database_name) + + def test_db_signature(self): + """ + Return a tuple that uniquely identifies a test database. + + This takes into account the special cases of ":memory:" and "" for + SQLite since the databases will be distinct despite having the same + TEST NAME. See http://www.sqlite.org/inmemorydb.html + """ + test_database_name = self._get_test_db_name() + sig = [self.connection.settings_dict['NAME']] + if self.is_in_memory_db(test_database_name): + sig.append(self.connection.alias) + return tuple(sig) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/features.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/features.py new file mode 100644 index 0000000000000000000000000000000000000000..82c1a34d8936254d99b81499bfd1b110f1c81e01 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/features.py @@ -0,0 +1,59 @@ +from django.db import utils +from django.db.backends.base.features import BaseDatabaseFeatures +from django.utils.functional import cached_property + +from .base import Database + + +class DatabaseFeatures(BaseDatabaseFeatures): + # SQLite cannot handle us only partially reading from a cursor's result set + # and then writing the same rows to the database in another cursor. This + # setting ensures we always read result sets fully into memory all in one + # go. + can_use_chunked_reads = False + test_db_allows_multiple_connections = False + supports_unspecified_pk = True + supports_timezones = False + max_query_params = 999 + supports_mixed_date_datetime_comparisons = False + supports_column_check_constraints = False + autocommits_when_autocommit_is_off = True + can_introspect_decimal_field = False + can_introspect_positive_integer_field = True + can_introspect_small_integer_field = True + supports_transactions = True + atomic_transactions = False + can_rollback_ddl = True + supports_atomic_references_rename = False + supports_paramstyle_pyformat = False + supports_sequence_reset = False + can_clone_databases = True + supports_temporal_subtraction = True + ignores_table_name_case = True + supports_cast_with_precision = False + uses_savepoints = Database.sqlite_version_info >= (3, 6, 8) + supports_index_column_ordering = Database.sqlite_version_info >= (3, 3, 0) + can_release_savepoints = uses_savepoints + can_share_in_memory_db = ( + Database.__name__ == 'sqlite3.dbapi2' and + Database.sqlite_version_info >= (3, 7, 13) + ) + + @cached_property + def supports_stddev(self): + """ + Confirm support for STDDEV and related stats functions. + + SQLite supports STDDEV as an extension package; so + connection.ops.check_expression_support() can't unilaterally + rule out support for STDDEV. Manually check whether the call works. + """ + with self.connection.cursor() as cursor: + cursor.execute('CREATE TABLE STDDEV_TEST (X INT)') + try: + cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST') + has_support = True + except utils.DatabaseError: + has_support = False + cursor.execute('DROP TABLE STDDEV_TEST') + return has_support diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/introspection.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..de6d6da46542123e05f72aeca7dd644f4bbcfc76 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/introspection.py @@ -0,0 +1,313 @@ +import re +import warnings + +from django.db.backends.base.introspection import ( + BaseDatabaseIntrospection, FieldInfo, TableInfo, +) +from django.db.models.indexes import Index +from django.utils.deprecation import RemovedInDjango21Warning + +field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$') + + +def get_field_size(name): + """ Extract the size number from a "varchar(11)" type name """ + m = field_size_re.search(name) + return int(m.group(1)) if m else None + + +# This light wrapper "fakes" a dictionary interface, because some SQLite data +# types include variables in them -- e.g. "varchar(30)" -- and can't be matched +# as a simple dictionary lookup. +class FlexibleFieldLookupDict: + # Maps SQL types to Django Field types. Some of the SQL types have multiple + # entries here because SQLite allows for anything and doesn't normalize the + # field type; it uses whatever was given. + base_data_types_reverse = { + 'bool': 'BooleanField', + 'boolean': 'BooleanField', + 'smallint': 'SmallIntegerField', + 'smallint unsigned': 'PositiveSmallIntegerField', + 'smallinteger': 'SmallIntegerField', + 'int': 'IntegerField', + 'integer': 'IntegerField', + 'bigint': 'BigIntegerField', + 'integer unsigned': 'PositiveIntegerField', + 'decimal': 'DecimalField', + 'real': 'FloatField', + 'text': 'TextField', + 'char': 'CharField', + 'blob': 'BinaryField', + 'date': 'DateField', + 'datetime': 'DateTimeField', + 'time': 'TimeField', + } + + def __getitem__(self, key): + key = key.lower() + try: + return self.base_data_types_reverse[key] + except KeyError: + size = get_field_size(key) + if size is not None: + return ('CharField', {'max_length': size}) + raise KeyError + + +class DatabaseIntrospection(BaseDatabaseIntrospection): + data_types_reverse = FlexibleFieldLookupDict() + + def get_table_list(self, cursor): + """Return a list of table and view names in the current database.""" + # Skip the sqlite_sequence system table used for autoincrement key + # generation. + cursor.execute(""" + SELECT name, type FROM sqlite_master + WHERE type in ('table', 'view') AND NOT name='sqlite_sequence' + ORDER BY name""") + return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()] + + def get_table_description(self, cursor, table_name): + """ + Return a description of the table with the DB-API cursor.description + interface. + """ + return [ + FieldInfo( + info['name'], + info['type'], + None, + info['size'], + None, + None, + info['null_ok'], + info['default'], + ) for info in self._table_info(cursor, table_name) + ] + + def get_sequences(self, cursor, table_name, table_fields=()): + pk_col = self.get_primary_key_column(cursor, table_name) + return [{'table': table_name, 'column': pk_col}] + + def column_name_converter(self, name): + """ + SQLite will in some cases, e.g. when returning columns from views and + subselects, return column names in 'alias."column"' format instead of + simply 'column'. + + Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c + """ + # TODO: remove when SQLite < 3.7.15 is sufficiently old. + # 3.7.13 ships in Debian stable as of 2014-03-21. + if self.connection.Database.sqlite_version_info < (3, 7, 15): + return name.split('.')[-1].strip('"') + else: + return name + + def get_relations(self, cursor, table_name): + """ + Return a dictionary of {field_name: (field_name_other_table, other_table)} + representing all relationships to the given table. + """ + # Dictionary of relations to return + relations = {} + + # Schema for this table + cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) + try: + results = cursor.fetchone()[0].strip() + except TypeError: + # It might be a view, then no results will be returned + return relations + results = results[results.index('(') + 1:results.rindex(')')] + + # Walk through and look for references to other tables. SQLite doesn't + # really have enforced references, but since it echoes out the SQL used + # to create the table we can look for REFERENCES statements used there. + for field_desc in results.split(','): + field_desc = field_desc.strip() + if field_desc.startswith("UNIQUE"): + continue + + m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I) + if not m: + continue + table, column = [s.strip('"') for s in m.groups()] + + if field_desc.startswith("FOREIGN KEY"): + # Find name of the target FK field + m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I) + field_name = m.groups()[0].strip('"') + else: + field_name = field_desc.split()[0].strip('"') + + cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table]) + result = cursor.fetchall()[0] + other_table_results = result[0].strip() + li, ri = other_table_results.index('('), other_table_results.rindex(')') + other_table_results = other_table_results[li + 1:ri] + + for other_desc in other_table_results.split(','): + other_desc = other_desc.strip() + if other_desc.startswith('UNIQUE'): + continue + + other_name = other_desc.split(' ', 1)[0].strip('"') + if other_name == column: + relations[field_name] = (other_name, table) + break + + return relations + + def get_key_columns(self, cursor, table_name): + """ + Return a list of (column_name, referenced_table_name, referenced_column_name) + for all key columns in given table. + """ + key_columns = [] + + # Schema for this table + cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) + results = cursor.fetchone()[0].strip() + results = results[results.index('(') + 1:results.rindex(')')] + + # Walk through and look for references to other tables. SQLite doesn't + # really have enforced references, but since it echoes out the SQL used + # to create the table we can look for REFERENCES statements used there. + for field_index, field_desc in enumerate(results.split(',')): + field_desc = field_desc.strip() + if field_desc.startswith("UNIQUE"): + continue + + m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I) + if not m: + continue + + # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns + key_columns.append(tuple(s.strip('"') for s in m.groups())) + + return key_columns + + def get_indexes(self, cursor, table_name): + warnings.warn( + "get_indexes() is deprecated in favor of get_constraints().", + RemovedInDjango21Warning, stacklevel=2 + ) + indexes = {} + for info in self._table_info(cursor, table_name): + if info['pk'] != 0: + indexes[info['name']] = {'primary_key': True, + 'unique': False} + cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name)) + # seq, name, unique + for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]: + cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index)) + info = cursor.fetchall() + # Skip indexes across multiple fields + if len(info) != 1: + continue + name = info[0][2] # seqno, cid, name + indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False), + 'unique': unique} + return indexes + + def get_primary_key_column(self, cursor, table_name): + """Return the column name of the primary key for the given table.""" + # Don't use PRAGMA because that causes issues with some transactions + cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) + row = cursor.fetchone() + if row is None: + raise ValueError("Table %s does not exist" % table_name) + results = row[0].strip() + results = results[results.index('(') + 1:results.rindex(')')] + for field_desc in results.split(','): + field_desc = field_desc.strip() + m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?', field_desc) + if m: + return m.groups()[0] + return None + + def _table_info(self, cursor, name): + cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name)) + # cid, name, type, notnull, default_value, pk + return [{ + 'name': field[1], + 'type': field[2], + 'size': get_field_size(field[2]), + 'null_ok': not field[3], + 'default': field[4], + 'pk': field[5], # undocumented + } for field in cursor.fetchall()] + + def _get_foreign_key_constraints(self, cursor, table_name): + constraints = {} + cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name)) + for row in cursor.fetchall(): + # Remaining on_update/on_delete/match values are of no interest. + id_, _, table, from_, to = row[:5] + constraints['fk_%d' % id_] = { + 'columns': [from_], + 'primary_key': False, + 'unique': False, + 'foreign_key': (table, to), + 'check': False, + 'index': False, + } + return constraints + + def get_constraints(self, cursor, table_name): + """ + Retrieve any constraints or keys (unique, pk, fk, check, index) across + one or more columns. + """ + constraints = {} + # Get the index info + cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name)) + for row in cursor.fetchall(): + # Sqlite3 3.8.9+ has 5 columns, however older versions only give 3 + # columns. Discard last 2 columns if there. + number, index, unique = row[:3] + # Get the index info for that index + cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index)) + for index_rank, column_rank, column in cursor.fetchall(): + if index not in constraints: + constraints[index] = { + "columns": [], + "primary_key": False, + "unique": bool(unique), + "foreign_key": False, + "check": False, + "index": True, + } + constraints[index]['columns'].append(column) + # Add type and column orders for indexes + if constraints[index]['index'] and not constraints[index]['unique']: + # SQLite doesn't support any index type other than b-tree + constraints[index]['type'] = Index.suffix + cursor.execute( + "SELECT sql FROM sqlite_master " + "WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index) + ) + orders = [] + # There would be only 1 row to loop over + for sql, in cursor.fetchall(): + order_info = sql.split('(')[-1].split(')')[0].split(',') + orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info] + constraints[index]['orders'] = orders + # Get the PK + pk_column = self.get_primary_key_column(cursor, table_name) + if pk_column: + # SQLite doesn't actually give a name to the PK constraint, + # so we invent one. This is fine, as the SQLite backend never + # deletes PK constraints by name, as you can't delete constraints + # in SQLite; we remake the table with a new PK instead. + constraints["__primary__"] = { + "columns": [pk_column], + "primary_key": True, + "unique": False, # It's not actually a unique constraint. + "foreign_key": False, + "check": False, + "index": False, + } + constraints.update(self._get_foreign_key_constraints(cursor, table_name)) + return constraints diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/operations.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..054e3afbd095dadbe72df3305b61130b5ba56654 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/operations.py @@ -0,0 +1,283 @@ +import datetime +import uuid +from decimal import Decimal + +from django.conf import settings +from django.core.exceptions import FieldError +from django.db import utils +from django.db.backends.base.operations import BaseDatabaseOperations +from django.db.models import aggregates, fields +from django.utils import timezone +from django.utils.dateparse import parse_date, parse_datetime, parse_time +from django.utils.duration import duration_string + + +class DatabaseOperations(BaseDatabaseOperations): + cast_char_field_without_max_length = 'text' + + def bulk_batch_size(self, fields, objs): + """ + SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of + 999 variables per query. + + If there's only a single field to insert, the limit is 500 + (SQLITE_MAX_COMPOUND_SELECT). + """ + if len(fields) == 1: + return 500 + elif len(fields) > 1: + return self.connection.features.max_query_params // len(fields) + else: + return len(objs) + + def check_expression_support(self, expression): + bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField) + bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev) + if isinstance(expression, bad_aggregates): + for expr in expression.get_source_expressions(): + try: + output_field = expr.output_field + except FieldError: + # Not every subexpression has an output_field which is fine + # to ignore. + pass + else: + if isinstance(output_field, bad_fields): + raise NotImplementedError( + 'You cannot use Sum, Avg, StdDev, and Variance ' + 'aggregations on date/time fields in sqlite3 ' + 'since date/time is saved as text.' + ) + + def date_extract_sql(self, lookup_type, field_name): + """ + Support EXTRACT with a user-defined function django_date_extract() + that's registered in connect(). Use single quotes because this is a + string and could otherwise cause a collision with a field name. + """ + return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name) + + def date_interval_sql(self, timedelta): + return "'%s'" % duration_string(timedelta) + + def format_for_duration_arithmetic(self, sql): + """Do nothing since formatting is handled in the custom function.""" + return sql + + def date_trunc_sql(self, lookup_type, field_name): + return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name) + + def time_trunc_sql(self, lookup_type, field_name): + return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name) + + def _convert_tzname_to_sql(self, tzname): + return "'%s'" % tzname if settings.USE_TZ else 'NULL' + + def datetime_cast_date_sql(self, field_name, tzname): + return "django_datetime_cast_date(%s, %s)" % ( + field_name, self._convert_tzname_to_sql(tzname), + ) + + def datetime_cast_time_sql(self, field_name, tzname): + return "django_datetime_cast_time(%s, %s)" % ( + field_name, self._convert_tzname_to_sql(tzname), + ) + + def datetime_extract_sql(self, lookup_type, field_name, tzname): + return "django_datetime_extract('%s', %s, %s)" % ( + lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname), + ) + + def datetime_trunc_sql(self, lookup_type, field_name, tzname): + return "django_datetime_trunc('%s', %s, %s)" % ( + lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname), + ) + + def time_extract_sql(self, lookup_type, field_name): + return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name) + + def pk_default_value(self): + return "NULL" + + def _quote_params_for_last_executed_query(self, params): + """ + Only for last_executed_query! Don't use this to execute SQL queries! + """ + # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the + # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the + # number of return values, default = 2000). Since Python's sqlite3 + # module doesn't expose the get_limit() C API, assume the default + # limits are in effect and split the work in batches if needed. + BATCH_SIZE = 999 + if len(params) > BATCH_SIZE: + results = () + for index in range(0, len(params), BATCH_SIZE): + chunk = params[index:index + BATCH_SIZE] + results += self._quote_params_for_last_executed_query(chunk) + return results + + sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params)) + # Bypass Django's wrappers and use the underlying sqlite3 connection + # to avoid logging this query - it would trigger infinite recursion. + cursor = self.connection.connection.cursor() + # Native sqlite3 cursors cannot be used as context managers. + try: + return cursor.execute(sql, params).fetchone() + finally: + cursor.close() + + def last_executed_query(self, cursor, sql, params): + # Python substitutes parameters in Modules/_sqlite/cursor.c with: + # pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars); + # Unfortunately there is no way to reach self->statement from Python, + # so we quote and substitute parameters manually. + if params: + if isinstance(params, (list, tuple)): + params = self._quote_params_for_last_executed_query(params) + else: + values = tuple(params.values()) + values = self._quote_params_for_last_executed_query(values) + params = dict(zip(params, values)) + return sql % params + # For consistency with SQLiteCursorWrapper.execute(), just return sql + # when there are no parameters. See #13648 and #17158. + else: + return sql + + def quote_name(self, name): + if name.startswith('"') and name.endswith('"'): + return name # Quoting once is enough. + return '"%s"' % name + + def no_limit_value(self): + return -1 + + def sql_flush(self, style, tables, sequences, allow_cascade=False): + sql = ['%s %s %s;' % ( + style.SQL_KEYWORD('DELETE'), + style.SQL_KEYWORD('FROM'), + style.SQL_FIELD(self.quote_name(table)) + ) for table in tables] + # Note: No requirement for reset of auto-incremented indices (cf. other + # sql_flush() implementations). Just return SQL at this point + return sql + + def execute_sql_flush(self, using, sql_list): + # To prevent possible violation of foreign key constraints, deactivate + # constraints outside of the transaction created in super(). + with self.connection.constraint_checks_disabled(): + super().execute_sql_flush(using, sql_list) + + def adapt_datetimefield_value(self, value): + if value is None: + return None + + # Expression values are adapted by the database. + if hasattr(value, 'resolve_expression'): + return value + + # SQLite doesn't support tz-aware datetimes + if timezone.is_aware(value): + if settings.USE_TZ: + value = timezone.make_naive(value, self.connection.timezone) + else: + raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.") + + return str(value) + + def adapt_timefield_value(self, value): + if value is None: + return None + + # Expression values are adapted by the database. + if hasattr(value, 'resolve_expression'): + return value + + # SQLite doesn't support tz-aware datetimes + if timezone.is_aware(value): + raise ValueError("SQLite backend does not support timezone-aware times.") + + return str(value) + + def get_db_converters(self, expression): + converters = super().get_db_converters(expression) + internal_type = expression.output_field.get_internal_type() + if internal_type == 'DateTimeField': + converters.append(self.convert_datetimefield_value) + elif internal_type == 'DateField': + converters.append(self.convert_datefield_value) + elif internal_type == 'TimeField': + converters.append(self.convert_timefield_value) + elif internal_type == 'DecimalField': + converters.append(self.convert_decimalfield_value) + elif internal_type == 'UUIDField': + converters.append(self.convert_uuidfield_value) + elif internal_type in ('NullBooleanField', 'BooleanField'): + converters.append(self.convert_booleanfield_value) + return converters + + def convert_datetimefield_value(self, value, expression, connection): + if value is not None: + if not isinstance(value, datetime.datetime): + value = parse_datetime(value) + if settings.USE_TZ and not timezone.is_aware(value): + value = timezone.make_aware(value, self.connection.timezone) + return value + + def convert_datefield_value(self, value, expression, connection): + if value is not None: + if not isinstance(value, datetime.date): + value = parse_date(value) + return value + + def convert_timefield_value(self, value, expression, connection): + if value is not None: + if not isinstance(value, datetime.time): + value = parse_time(value) + return value + + def convert_decimalfield_value(self, value, expression, connection): + if value is not None: + value = expression.output_field.format_number(value) + value = Decimal(value) + return value + + def convert_uuidfield_value(self, value, expression, connection): + if value is not None: + value = uuid.UUID(value) + return value + + def convert_booleanfield_value(self, value, expression, connection): + return bool(value) if value in (1, 0) else value + + def bulk_insert_sql(self, fields, placeholder_rows): + return " UNION ALL ".join( + "SELECT %s" % ", ".join(row) + for row in placeholder_rows + ) + + def combine_expression(self, connector, sub_expressions): + # SQLite doesn't have a power function, so we fake it with a + # user-defined function django_power that's registered in connect(). + if connector == '^': + return 'django_power(%s)' % ','.join(sub_expressions) + return super().combine_expression(connector, sub_expressions) + + def combine_duration_expression(self, connector, sub_expressions): + if connector not in ['+', '-']: + raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector) + fn_params = ["'%s'" % connector] + sub_expressions + if len(fn_params) > 3: + raise ValueError('Too many params for timedelta operations.') + return "django_format_dtdelta(%s)" % ', '.join(fn_params) + + def integer_field_range(self, internal_type): + # SQLite doesn't enforce any integer constraints + return (None, None) + + def subtract_temporals(self, internal_type, lhs, rhs): + lhs_sql, lhs_params = lhs + rhs_sql, rhs_params = rhs + if internal_type == 'TimeField': + return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params + return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/schema.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..add05843c34ac59876101353eef67c610f686054 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/sqlite3/schema.py @@ -0,0 +1,370 @@ +import codecs +import contextlib +import copy +from decimal import Decimal + +from django.apps.registry import Apps +from django.db.backends.base.schema import BaseDatabaseSchemaEditor +from django.db.backends.ddl_references import Statement +from django.db.transaction import atomic +from django.db.utils import NotSupportedError + + +class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): + + sql_delete_table = "DROP TABLE %(table)s" + sql_create_fk = None + sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" + sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)" + sql_delete_unique = "DROP INDEX %(name)s" + + def __enter__(self): + # Some SQLite schema alterations need foreign key constraints to be + # disabled. Enforce it here for the duration of the transaction. + self.connection.disable_constraint_checking() + return super().__enter__() + + def __exit__(self, exc_type, exc_value, traceback): + super().__exit__(exc_type, exc_value, traceback) + self.connection.enable_constraint_checking() + + def quote_value(self, value): + # The backend "mostly works" without this function and there are use + # cases for compiling Python without the sqlite3 libraries (e.g. + # security hardening). + try: + import sqlite3 + value = sqlite3.adapt(value) + except ImportError: + pass + except sqlite3.ProgrammingError: + pass + # Manual emulation of SQLite parameter quoting + if isinstance(value, type(True)): + return str(int(value)) + elif isinstance(value, (Decimal, float, int)): + return str(value) + elif isinstance(value, str): + return "'%s'" % value.replace("\'", "\'\'") + elif value is None: + return "NULL" + elif isinstance(value, (bytes, bytearray, memoryview)): + # Bytes are only allowed for BLOB fields, encoded as string + # literals containing hexadecimal data and preceded by a single "X" + # character: + # value = b'\x01\x02' => value_hex = b'0102' => return X'0102' + value = bytes(value) + hex_encoder = codecs.getencoder('hex_codec') + value_hex, _length = hex_encoder(value) + # Use 'ascii' encoding for b'01' => '01', no need to use force_text here. + return "X'%s'" % value_hex.decode('ascii') + else: + raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value))) + + def _is_referenced_by_fk_constraint(self, table_name, column_name=None, ignore_self=False): + """ + Return whether or not the provided table name is referenced by another + one. If `column_name` is specified, only references pointing to that + column are considered. If `ignore_self` is True, self-referential + constraints are ignored. + """ + with self.connection.cursor() as cursor: + for other_table in self.connection.introspection.get_table_list(cursor): + if ignore_self and other_table.name == table_name: + continue + constraints = self.connection.introspection._get_foreign_key_constraints(cursor, other_table.name) + for constraint in constraints.values(): + constraint_table, constraint_column = constraint['foreign_key'] + if (constraint_table == table_name and + (column_name is None or constraint_column == column_name)): + return True + return False + + def alter_db_table(self, model, old_db_table, new_db_table, disable_constraints=True): + if disable_constraints and self._is_referenced_by_fk_constraint(old_db_table): + if self.connection.in_atomic_block: + raise NotSupportedError(( + 'Renaming the %r table while in a transaction is not ' + 'supported on SQLite because it would break referential ' + 'integrity. Try adding `atomic = False` to the Migration class.' + ) % old_db_table) + self.connection.enable_constraint_checking() + super().alter_db_table(model, old_db_table, new_db_table) + self.connection.disable_constraint_checking() + else: + super().alter_db_table(model, old_db_table, new_db_table) + + def alter_field(self, model, old_field, new_field, strict=False): + old_field_name = old_field.name + table_name = model._meta.db_table + _, old_column_name = old_field.get_attname_column() + if (new_field.name != old_field_name and + self._is_referenced_by_fk_constraint(table_name, old_column_name, ignore_self=True)): + if self.connection.in_atomic_block: + raise NotSupportedError(( + 'Renaming the %r.%r column while in a transaction is not ' + 'supported on SQLite because it would break referential ' + 'integrity. Try adding `atomic = False` to the Migration class.' + ) % (model._meta.db_table, old_field_name)) + with atomic(self.connection.alias): + super().alter_field(model, old_field, new_field, strict=strict) + # Follow SQLite's documented procedure for performing changes + # that don't affect the on-disk content. + # https://sqlite.org/lang_altertable.html#otheralter + with self.connection.cursor() as cursor: + schema_version = cursor.execute('PRAGMA schema_version').fetchone()[0] + cursor.execute('PRAGMA writable_schema = 1') + references_template = ' REFERENCES "%s" ("%%s") ' % table_name + new_column_name = new_field.get_attname_column()[1] + search = references_template % old_column_name + replacement = references_template % new_column_name + cursor.execute('UPDATE sqlite_master SET sql = replace(sql, %s, %s)', (search, replacement)) + cursor.execute('PRAGMA schema_version = %d' % (schema_version + 1)) + cursor.execute('PRAGMA writable_schema = 0') + # The integrity check will raise an exception and rollback + # the transaction if the sqlite_master updates corrupt the + # database. + cursor.execute('PRAGMA integrity_check') + # Perform a VACUUM to refresh the database representation from + # the sqlite_master table. + with self.connection.cursor() as cursor: + cursor.execute('VACUUM') + else: + super().alter_field(model, old_field, new_field, strict=strict) + + def _remake_table(self, model, create_field=None, delete_field=None, alter_field=None): + """ + Shortcut to transform a model from old_model into new_model + + The essential steps are: + 1. rename the model's existing table, e.g. "app_model" to "app_model__old" + 2. create a table with the updated definition called "app_model" + 3. copy the data from the old renamed table to the new table + 4. delete the "app_model__old" table + """ + # Self-referential fields must be recreated rather than copied from + # the old model to ensure their remote_field.field_name doesn't refer + # to an altered field. + def is_self_referential(f): + return f.is_relation and f.remote_field.model is model + # Work out the new fields dict / mapping + body = { + f.name: f.clone() if is_self_referential(f) else f + for f in model._meta.local_concrete_fields + } + # Since mapping might mix column names and default values, + # its values must be already quoted. + mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields} + # This maps field names (not columns) for things like unique_together + rename_mapping = {} + # If any of the new or altered fields is introducing a new PK, + # remove the old one + restore_pk_field = None + if getattr(create_field, 'primary_key', False) or ( + alter_field and getattr(alter_field[1], 'primary_key', False)): + for name, field in list(body.items()): + if field.primary_key: + field.primary_key = False + restore_pk_field = field + if field.auto_created: + del body[name] + del mapping[field.column] + # Add in any created fields + if create_field: + body[create_field.name] = create_field + # Choose a default and insert it into the copy map + if not create_field.many_to_many and create_field.concrete: + mapping[create_field.column] = self.quote_value( + self.effective_default(create_field) + ) + # Add in any altered fields + if alter_field: + old_field, new_field = alter_field + body.pop(old_field.name, None) + mapping.pop(old_field.column, None) + body[new_field.name] = new_field + if old_field.null and not new_field.null: + case_sql = "coalesce(%(col)s, %(default)s)" % { + 'col': self.quote_name(old_field.column), + 'default': self.quote_value(self.effective_default(new_field)) + } + mapping[new_field.column] = case_sql + else: + mapping[new_field.column] = self.quote_name(old_field.column) + rename_mapping[old_field.name] = new_field.name + # Remove any deleted fields + if delete_field: + del body[delete_field.name] + del mapping[delete_field.column] + # Remove any implicit M2M tables + if delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created: + return self.delete_model(delete_field.remote_field.through) + # Work inside a new app registry + apps = Apps() + + # Provide isolated instances of the fields to the new model body so + # that the existing model's internals aren't interfered with when + # the dummy model is constructed. + body = copy.deepcopy(body) + + # Work out the new value of unique_together, taking renames into + # account + unique_together = [ + [rename_mapping.get(n, n) for n in unique] + for unique in model._meta.unique_together + ] + + # Work out the new value for index_together, taking renames into + # account + index_together = [ + [rename_mapping.get(n, n) for n in index] + for index in model._meta.index_together + ] + + indexes = model._meta.indexes + if delete_field: + indexes = [ + index for index in indexes + if delete_field.name not in index.fields + ] + + # Construct a new model for the new state + meta_contents = { + 'app_label': model._meta.app_label, + 'db_table': model._meta.db_table, + 'unique_together': unique_together, + 'index_together': index_together, + 'indexes': indexes, + 'apps': apps, + } + meta = type("Meta", (), meta_contents) + body['Meta'] = meta + body['__module__'] = model.__module__ + + temp_model = type(model._meta.object_name, model.__bases__, body) + + # We need to modify model._meta.db_table, but everything explodes + # if the change isn't reversed before the end of this method. This + # context manager helps us avoid that situation. + @contextlib.contextmanager + def altered_table_name(model, temporary_table_name): + original_table_name = model._meta.db_table + model._meta.db_table = temporary_table_name + yield + model._meta.db_table = original_table_name + + with altered_table_name(model, model._meta.db_table + "__old"): + # Rename the old table to make way for the new + self.alter_db_table( + model, temp_model._meta.db_table, model._meta.db_table, + disable_constraints=False, + ) + # Create a new table with the updated schema. + self.create_model(temp_model) + + # Copy data from the old table into the new table + field_maps = list(mapping.items()) + self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( + self.quote_name(temp_model._meta.db_table), + ', '.join(self.quote_name(x) for x, y in field_maps), + ', '.join(y for x, y in field_maps), + self.quote_name(model._meta.db_table), + )) + + # Delete the old table + self.delete_model(model, handle_autom2m=False) + + # Run deferred SQL on correct table + for sql in self.deferred_sql: + self.execute(sql) + self.deferred_sql = [] + # Fix any PK-removed field + if restore_pk_field: + restore_pk_field.primary_key = True + + def delete_model(self, model, handle_autom2m=True): + if handle_autom2m: + super().delete_model(model) + else: + # Delete the table (and only that) + self.execute(self.sql_delete_table % { + "table": self.quote_name(model._meta.db_table), + }) + # Remove all deferred statements referencing the deleted table. + for sql in list(self.deferred_sql): + if isinstance(sql, Statement) and sql.references_table(model._meta.db_table): + self.deferred_sql.remove(sql) + + def add_field(self, model, field): + """ + Create a field on a model. Usually involves adding a column, but may + involve adding a table instead (for M2M fields). + """ + # Special-case implicit M2M tables + if field.many_to_many and field.remote_field.through._meta.auto_created: + return self.create_model(field.remote_field.through) + self._remake_table(model, create_field=field) + + def remove_field(self, model, field): + """ + Remove a field from a model. Usually involves deleting a column, + but for M2Ms may involve deleting a table. + """ + # M2M fields are a special case + if field.many_to_many: + # For implicit M2M tables, delete the auto-created table + if field.remote_field.through._meta.auto_created: + self.delete_model(field.remote_field.through) + # For explicit "through" M2M fields, do nothing + # For everything else, remake. + else: + # It might not actually have a column behind it + if field.db_parameters(connection=self.connection)['type'] is None: + return + self._remake_table(model, delete_field=field) + + def _alter_field(self, model, old_field, new_field, old_type, new_type, + old_db_params, new_db_params, strict=False): + """Perform a "physical" (non-ManyToMany) field update.""" + # Alter by remaking table + self._remake_table(model, alter_field=(old_field, new_field)) + # Rebuild tables with FKs pointing to this field if the PK type changed. + if old_field.primary_key and new_field.primary_key and old_type != new_type: + for rel in new_field.model._meta.related_objects: + if not rel.many_to_many: + self._remake_table(rel.related_model) + + def _alter_many_to_many(self, model, old_field, new_field, strict): + """Alter M2Ms to repoint their to= endpoints.""" + if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table: + # The field name didn't change, but some options did; we have to propagate this altering. + self._remake_table( + old_field.remote_field.through, + alter_field=( + # We need the field that points to the target model, so we can tell alter_field to change it - + # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) + old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), + new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), + ), + ) + return + + # Make a new through table + self.create_model(new_field.remote_field.through) + # Copy the data across + self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( + self.quote_name(new_field.remote_field.through._meta.db_table), + ', '.join([ + "id", + new_field.m2m_column_name(), + new_field.m2m_reverse_name(), + ]), + ', '.join([ + "id", + old_field.m2m_column_name(), + old_field.m2m_reverse_name(), + ]), + self.quote_name(old_field.remote_field.through._meta.db_table), + )) + # Delete the old through table + self.delete_model(old_field.remote_field.through) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f4641e3db8b055b080d16f4885bef6a641c26459 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/backends/utils.py @@ -0,0 +1,256 @@ +import datetime +import decimal +import functools +import hashlib +import logging +from time import time + +from django.conf import settings +from django.db.utils import NotSupportedError +from django.utils.encoding import force_bytes +from django.utils.timezone import utc + +logger = logging.getLogger('django.db.backends') + + +class CursorWrapper: + def __init__(self, cursor, db): + self.cursor = cursor + self.db = db + + WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset']) + + def __getattr__(self, attr): + cursor_attr = getattr(self.cursor, attr) + if attr in CursorWrapper.WRAP_ERROR_ATTRS: + return self.db.wrap_database_errors(cursor_attr) + else: + return cursor_attr + + def __iter__(self): + with self.db.wrap_database_errors: + yield from self.cursor + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + # Close instead of passing through to avoid backend-specific behavior + # (#17671). Catch errors liberally because errors in cleanup code + # aren't useful. + try: + self.close() + except self.db.Database.Error: + pass + + # The following methods cannot be implemented in __getattr__, because the + # code must run when the method is invoked, not just when it is accessed. + + def callproc(self, procname, params=None, kparams=None): + # Keyword parameters for callproc aren't supported in PEP 249, but the + # database driver may support them (e.g. cx_Oracle). + if kparams is not None and not self.db.features.supports_callproc_kwargs: + raise NotSupportedError( + 'Keyword parameters for callproc are not supported on this ' + 'database backend.' + ) + self.db.validate_no_broken_transaction() + with self.db.wrap_database_errors: + if params is None and kparams is None: + return self.cursor.callproc(procname) + elif kparams is None: + return self.cursor.callproc(procname, params) + else: + params = params or () + return self.cursor.callproc(procname, params, kparams) + + def execute(self, sql, params=None): + return self._execute_with_wrappers(sql, params, many=False, executor=self._execute) + + def executemany(self, sql, param_list): + return self._execute_with_wrappers(sql, param_list, many=True, executor=self._executemany) + + def _execute_with_wrappers(self, sql, params, many, executor): + context = {'connection': self.db, 'cursor': self} + for wrapper in reversed(self.db.execute_wrappers): + executor = functools.partial(wrapper, executor) + return executor(sql, params, many, context) + + def _execute(self, sql, params, *ignored_wrapper_args): + self.db.validate_no_broken_transaction() + with self.db.wrap_database_errors: + if params is None: + return self.cursor.execute(sql) + else: + return self.cursor.execute(sql, params) + + def _executemany(self, sql, param_list, *ignored_wrapper_args): + self.db.validate_no_broken_transaction() + with self.db.wrap_database_errors: + return self.cursor.executemany(sql, param_list) + + +class CursorDebugWrapper(CursorWrapper): + + # XXX callproc isn't instrumented at this time. + + def execute(self, sql, params=None): + start = time() + try: + return super().execute(sql, params) + finally: + stop = time() + duration = stop - start + sql = self.db.ops.last_executed_query(self.cursor, sql, params) + self.db.queries_log.append({ + 'sql': sql, + 'time': "%.3f" % duration, + }) + logger.debug( + '(%.3f) %s; args=%s', duration, sql, params, + extra={'duration': duration, 'sql': sql, 'params': params} + ) + + def executemany(self, sql, param_list): + start = time() + try: + return super().executemany(sql, param_list) + finally: + stop = time() + duration = stop - start + try: + times = len(param_list) + except TypeError: # param_list could be an iterator + times = '?' + self.db.queries_log.append({ + 'sql': '%s times: %s' % (times, sql), + 'time': "%.3f" % duration, + }) + logger.debug( + '(%.3f) %s; args=%s', duration, sql, param_list, + extra={'duration': duration, 'sql': sql, 'params': param_list} + ) + + +############################################### +# Converters from database (string) to Python # +############################################### + +def typecast_date(s): + return datetime.date(*map(int, s.split('-'))) if s else None # return None if s is null + + +def typecast_time(s): # does NOT store time zone information + if not s: + return None + hour, minutes, seconds = s.split(':') + if '.' in seconds: # check whether seconds have a fractional part + seconds, microseconds = seconds.split('.') + else: + microseconds = '0' + return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6])) + + +def typecast_timestamp(s): # does NOT store time zone information + # "2005-07-29 15:48:00.590358-05" + # "2005-07-29 09:56:00-05" + if not s: + return None + if ' ' not in s: + return typecast_date(s) + d, t = s.split() + # Extract timezone information, if it exists. Currently it's ignored. + if '-' in t: + t, tz = t.split('-', 1) + tz = '-' + tz + elif '+' in t: + t, tz = t.split('+', 1) + tz = '+' + tz + else: + tz = '' + dates = d.split('-') + times = t.split(':') + seconds = times[2] + if '.' in seconds: # check whether seconds have a fractional part + seconds, microseconds = seconds.split('.') + else: + microseconds = '0' + tzinfo = utc if settings.USE_TZ else None + return datetime.datetime( + int(dates[0]), int(dates[1]), int(dates[2]), + int(times[0]), int(times[1]), int(seconds), + int((microseconds + '000000')[:6]), tzinfo + ) + + +############################################### +# Converters from Python to database (string) # +############################################### + +def rev_typecast_decimal(d): + if d is None: + return None + return str(d) + + +def split_identifier(identifier): + """ + Split a SQL identifier into a two element tuple of (namespace, name). + + The identifier could be a table, column, or sequence name might be prefixed + by a namespace. + """ + try: + namespace, name = identifier.split('"."') + except ValueError: + namespace, name = '', identifier + return namespace.strip('"'), name.strip('"') + + +def truncate_name(identifier, length=None, hash_len=4): + """ + Shorten a SQL identifier to a repeatable mangled version with the given + length. + + If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE, + truncate the table portion only. + """ + namespace, name = split_identifier(identifier) + + if length is None or len(name) <= length: + return identifier + + digest = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len] + return '%s%s%s' % ('%s"."' % namespace if namespace else '', name[:length - hash_len], digest) + + +def format_number(value, max_digits, decimal_places): + """ + Format a number into a string with the requisite number of digits and + decimal places. + """ + if value is None: + return None + if isinstance(value, decimal.Decimal): + context = decimal.getcontext().copy() + if max_digits is not None: + context.prec = max_digits + if decimal_places is not None: + value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context) + else: + context.traps[decimal.Rounded] = 1 + value = context.create_decimal(value) + return "{:f}".format(value) + if decimal_places is not None: + return "%.*f" % (decimal_places, value) + return "{:f}".format(value) + + +def strip_quotes(table_name): + """ + Strip quotes off of quoted table names to make them safe for use in index + names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming + scheme) becomes 'USER"."TABLE'. + """ + has_quotes = table_name.startswith('"') and table_name.endswith('"') + return table_name[1:-1] if has_quotes else table_name diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f3d2d7689a49f599b6c4868c5addb0cb9bff84 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/__init__.py @@ -0,0 +1,2 @@ +from .migration import Migration, swappable_dependency # NOQA +from .operations import * # NOQA diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/autodetector.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/autodetector.py new file mode 100644 index 0000000000000000000000000000000000000000..ece58b9babd1ccca7e21ed47edf8a1d09a3e3851 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/autodetector.py @@ -0,0 +1,1236 @@ +import functools +import re +from itertools import chain + +from django.conf import settings +from django.db import models +from django.db.migrations import operations +from django.db.migrations.migration import Migration +from django.db.migrations.operations.models import AlterModelOptions +from django.db.migrations.optimizer import MigrationOptimizer +from django.db.migrations.questioner import MigrationQuestioner +from django.db.migrations.utils import ( + COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp, +) + +from .topological_sort import stable_topological_sort + + +class MigrationAutodetector: + """ + Take a pair of ProjectStates and compare them to see what the first would + need doing to make it match the second (the second usually being the + project's current state). + + Note that this naturally operates on entire projects at a time, + as it's likely that changes interact (for example, you can't + add a ForeignKey without having a migration to add the table it + depends on first). A user interface may offer single-app usage + if it wishes, with the caveat that it may not always be possible. + """ + + def __init__(self, from_state, to_state, questioner=None): + self.from_state = from_state + self.to_state = to_state + self.questioner = questioner or MigrationQuestioner() + self.existing_apps = {app for app, model in from_state.models} + + def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): + """ + Main entry point to produce a list of applicable changes. + Take a graph to base names on and an optional set of apps + to try and restrict to (restriction is not guaranteed) + """ + changes = self._detect_changes(convert_apps, graph) + changes = self.arrange_for_graph(changes, graph, migration_name) + if trim_to_apps: + changes = self._trim_to_apps(changes, trim_to_apps) + return changes + + def deep_deconstruct(self, obj): + """ + Recursive deconstruction for a field and its arguments. + Used for full comparison for rename/alter; sometimes a single-level + deconstruction will not compare correctly. + """ + if isinstance(obj, list): + return [self.deep_deconstruct(value) for value in obj] + elif isinstance(obj, tuple): + return tuple(self.deep_deconstruct(value) for value in obj) + elif isinstance(obj, dict): + return { + key: self.deep_deconstruct(value) + for key, value in obj.items() + } + elif isinstance(obj, functools.partial): + return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords)) + elif isinstance(obj, COMPILED_REGEX_TYPE): + return RegexObject(obj) + elif isinstance(obj, type): + # If this is a type that implements 'deconstruct' as an instance method, + # avoid treating this as being deconstructible itself - see #22951 + return obj + elif hasattr(obj, 'deconstruct'): + deconstructed = obj.deconstruct() + if isinstance(obj, models.Field): + # we have a field which also returns a name + deconstructed = deconstructed[1:] + path, args, kwargs = deconstructed + return ( + path, + [self.deep_deconstruct(value) for value in args], + { + key: self.deep_deconstruct(value) + for key, value in kwargs.items() + }, + ) + else: + return obj + + def only_relation_agnostic_fields(self, fields): + """ + Return a definition of the fields that ignores field names and + what related fields actually relate to. Used for detecting renames (as, + of course, the related fields change during renames). + """ + fields_def = [] + for name, field in sorted(fields): + deconstruction = self.deep_deconstruct(field) + if field.remote_field and field.remote_field.model: + del deconstruction[2]['to'] + fields_def.append(deconstruction) + return fields_def + + def _detect_changes(self, convert_apps=None, graph=None): + """ + Return a dict of migration plans which will achieve the + change from from_state to to_state. The dict has app labels + as keys and a list of migrations as values. + + The resulting migrations aren't specially named, but the names + do matter for dependencies inside the set. + + convert_apps is the list of apps to convert to use migrations + (i.e. to make initial migrations for, in the usual case) + + graph is an optional argument that, if provided, can help improve + dependency generation and avoid potential circular dependencies. + """ + # The first phase is generating all the operations for each app + # and gathering them into a big per-app list. + # Then go through that list, order it, and split into migrations to + # resolve dependencies caused by M2Ms and FKs. + self.generated_operations = {} + self.altered_indexes = {} + + # Prepare some old/new state and model lists, separating + # proxy models and ignoring unmigrated apps. + self.old_apps = self.from_state.concrete_apps + self.new_apps = self.to_state.apps + self.old_model_keys = set() + self.old_proxy_keys = set() + self.old_unmanaged_keys = set() + self.new_model_keys = set() + self.new_proxy_keys = set() + self.new_unmanaged_keys = set() + for al, mn in self.from_state.models: + model = self.old_apps.get_model(al, mn) + if not model._meta.managed: + self.old_unmanaged_keys.add((al, mn)) + elif al not in self.from_state.real_apps: + if model._meta.proxy: + self.old_proxy_keys.add((al, mn)) + else: + self.old_model_keys.add((al, mn)) + + for al, mn in self.to_state.models: + model = self.new_apps.get_model(al, mn) + if not model._meta.managed: + self.new_unmanaged_keys.add((al, mn)) + elif ( + al not in self.from_state.real_apps or + (convert_apps and al in convert_apps) + ): + if model._meta.proxy: + self.new_proxy_keys.add((al, mn)) + else: + self.new_model_keys.add((al, mn)) + + # Renames have to come first + self.generate_renamed_models() + + # Prepare lists of fields and generate through model map + self._prepare_field_lists() + self._generate_through_model_map() + + # Generate non-rename model operations + self.generate_deleted_models() + self.generate_created_models() + self.generate_deleted_proxies() + self.generate_created_proxies() + self.generate_altered_options() + self.generate_altered_managers() + + # Create the altered indexes and store them in self.altered_indexes. + # This avoids the same computation in generate_removed_indexes() + # and generate_added_indexes(). + self.create_altered_indexes() + # Generate index removal operations before field is removed + self.generate_removed_indexes() + # Generate field operations + self.generate_renamed_fields() + self.generate_removed_fields() + self.generate_added_fields() + self.generate_altered_fields() + self.generate_altered_unique_together() + self.generate_altered_index_together() + self.generate_added_indexes() + self.generate_altered_db_table() + self.generate_altered_order_with_respect_to() + + self._sort_migrations() + self._build_migration_list(graph) + self._optimize_migrations() + + return self.migrations + + def _prepare_field_lists(self): + """ + Prepare field lists and a list of the fields that used through models + in the old state so dependencies can be made from the through model + deletion to the field that uses it. + """ + self.kept_model_keys = self.old_model_keys & self.new_model_keys + self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys + self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys + self.through_users = {} + self.old_field_keys = { + (app_label, model_name, x) + for app_label, model_name in self.kept_model_keys + for x, y in self.from_state.models[ + app_label, + self.renamed_models.get((app_label, model_name), model_name) + ].fields + } + self.new_field_keys = { + (app_label, model_name, x) + for app_label, model_name in self.kept_model_keys + for x, y in self.to_state.models[app_label, model_name].fields + } + + def _generate_through_model_map(self): + """Through model map generation.""" + for app_label, model_name in sorted(self.old_model_keys): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + for field_name, field in old_model_state.fields: + old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name) + if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and + not old_field.remote_field.through._meta.auto_created): + through_key = ( + old_field.remote_field.through._meta.app_label, + old_field.remote_field.through._meta.model_name, + ) + self.through_users[through_key] = (app_label, old_model_name, field_name) + + def _build_migration_list(self, graph=None): + """ + Chop the lists of operations up into migrations with dependencies on + each other. Do this by going through an app's list of operations until + one is found that has an outgoing dependency that isn't in another + app's migration yet (hasn't been chopped off its list). Then chop off + the operations before it into a migration and move onto the next app. + If the loops completes without doing anything, there's a circular + dependency (which _should_ be impossible as the operations are + all split at this point so they can't depend and be depended on). + """ + self.migrations = {} + num_ops = sum(len(x) for x in self.generated_operations.values()) + chop_mode = False + while num_ops: + # On every iteration, we step through all the apps and see if there + # is a completed set of operations. + # If we find that a subset of the operations are complete we can + # try to chop it off from the rest and continue, but we only + # do this if we've already been through the list once before + # without any chopping and nothing has changed. + for app_label in sorted(self.generated_operations): + chopped = [] + dependencies = set() + for operation in list(self.generated_operations[app_label]): + deps_satisfied = True + operation_dependencies = set() + for dep in operation._auto_deps: + is_swappable_dep = False + if dep[0] == "__setting__": + # We need to temporarily resolve the swappable dependency to prevent + # circular references. While keeping the dependency checks on the + # resolved model we still add the swappable dependencies. + # See #23322 + resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.') + original_dep = dep + dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3]) + is_swappable_dep = True + if dep[0] != app_label and dep[0] != "__setting__": + # External app dependency. See if it's not yet + # satisfied. + for other_operation in self.generated_operations.get(dep[0], []): + if self.check_dependency(other_operation, dep): + deps_satisfied = False + break + if not deps_satisfied: + break + else: + if is_swappable_dep: + operation_dependencies.add((original_dep[0], original_dep[1])) + elif dep[0] in self.migrations: + operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name)) + else: + # If we can't find the other app, we add a first/last dependency, + # but only if we've already been through once and checked everything + if chop_mode: + # If the app already exists, we add a dependency on the last migration, + # as we don't know which migration contains the target field. + # If it's not yet migrated or has no migrations, we use __first__ + if graph and graph.leaf_nodes(dep[0]): + operation_dependencies.add(graph.leaf_nodes(dep[0])[0]) + else: + operation_dependencies.add((dep[0], "__first__")) + else: + deps_satisfied = False + if deps_satisfied: + chopped.append(operation) + dependencies.update(operation_dependencies) + self.generated_operations[app_label] = self.generated_operations[app_label][1:] + else: + break + # Make a migration! Well, only if there's stuff to put in it + if dependencies or chopped: + if not self.generated_operations[app_label] or chop_mode: + subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []}) + instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label) + instance.dependencies = list(dependencies) + instance.operations = chopped + instance.initial = app_label not in self.existing_apps + self.migrations.setdefault(app_label, []).append(instance) + chop_mode = False + else: + self.generated_operations[app_label] = chopped + self.generated_operations[app_label] + new_num_ops = sum(len(x) for x in self.generated_operations.values()) + if new_num_ops == num_ops: + if not chop_mode: + chop_mode = True + else: + raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations) + num_ops = new_num_ops + + def _sort_migrations(self): + """ + Reorder to make things possible. Reordering may be needed so FKs work + nicely inside the same app. + """ + for app_label, ops in sorted(self.generated_operations.items()): + # construct a dependency graph for intra-app dependencies + dependency_graph = {op: set() for op in ops} + for op in ops: + for dep in op._auto_deps: + if dep[0] == app_label: + for op2 in ops: + if self.check_dependency(op2, dep): + dependency_graph[op].add(op2) + + # we use a stable sort for deterministic tests & general behavior + self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph) + + def _optimize_migrations(self): + # Add in internal dependencies among the migrations + for app_label, migrations in self.migrations.items(): + for m1, m2 in zip(migrations, migrations[1:]): + m2.dependencies.append((app_label, m1.name)) + + # De-dupe dependencies + for app_label, migrations in self.migrations.items(): + for migration in migrations: + migration.dependencies = list(set(migration.dependencies)) + + # Optimize migrations + for app_label, migrations in self.migrations.items(): + for migration in migrations: + migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label) + + def check_dependency(self, operation, dependency): + """ + Return True if the given operation depends on the given dependency, + False otherwise. + """ + # Created model + if dependency[2] is None and dependency[3] is True: + return ( + isinstance(operation, operations.CreateModel) and + operation.name_lower == dependency[1].lower() + ) + # Created field + elif dependency[2] is not None and dependency[3] is True: + return ( + ( + isinstance(operation, operations.CreateModel) and + operation.name_lower == dependency[1].lower() and + any(dependency[2] == x for x, y in operation.fields) + ) or + ( + isinstance(operation, operations.AddField) and + operation.model_name_lower == dependency[1].lower() and + operation.name_lower == dependency[2].lower() + ) + ) + # Removed field + elif dependency[2] is not None and dependency[3] is False: + return ( + isinstance(operation, operations.RemoveField) and + operation.model_name_lower == dependency[1].lower() and + operation.name_lower == dependency[2].lower() + ) + # Removed model + elif dependency[2] is None and dependency[3] is False: + return ( + isinstance(operation, operations.DeleteModel) and + operation.name_lower == dependency[1].lower() + ) + # Field being altered + elif dependency[2] is not None and dependency[3] == "alter": + return ( + isinstance(operation, operations.AlterField) and + operation.model_name_lower == dependency[1].lower() and + operation.name_lower == dependency[2].lower() + ) + # order_with_respect_to being unset for a field + elif dependency[2] is not None and dependency[3] == "order_wrt_unset": + return ( + isinstance(operation, operations.AlterOrderWithRespectTo) and + operation.name_lower == dependency[1].lower() and + (operation.order_with_respect_to or "").lower() != dependency[2].lower() + ) + # Field is removed and part of an index/unique_together + elif dependency[2] is not None and dependency[3] == "foo_together_change": + return ( + isinstance(operation, (operations.AlterUniqueTogether, + operations.AlterIndexTogether)) and + operation.name_lower == dependency[1].lower() + ) + # Unknown dependency. Raise an error. + else: + raise ValueError("Can't handle dependency %r" % (dependency, )) + + def add_operation(self, app_label, operation, dependencies=None, beginning=False): + # Dependencies are (app_label, model_name, field_name, create/delete as True/False) + operation._auto_deps = dependencies or [] + if beginning: + self.generated_operations.setdefault(app_label, []).insert(0, operation) + else: + self.generated_operations.setdefault(app_label, []).append(operation) + + def swappable_first_key(self, item): + """ + Place potential swappable models first in lists of created models (only + real way to solve #22783). + """ + try: + model = self.new_apps.get_model(item[0], item[1]) + base_names = [base.__name__ for base in model.__bases__] + string_version = "%s.%s" % (item[0], item[1]) + if ( + model._meta.swappable or + "AbstractUser" in base_names or + "AbstractBaseUser" in base_names or + settings.AUTH_USER_MODEL.lower() == string_version.lower() + ): + return ("___" + item[0], "___" + item[1]) + except LookupError: + pass + return item + + def generate_renamed_models(self): + """ + Find any renamed models, generate the operations for them, and remove + the old entry from the model lists. Must be run before other + model-level generation. + """ + self.renamed_models = {} + self.renamed_models_rel = {} + added_models = self.new_model_keys - self.old_model_keys + for app_label, model_name in sorted(added_models): + model_state = self.to_state.models[app_label, model_name] + model_fields_def = self.only_relation_agnostic_fields(model_state.fields) + + removed_models = self.old_model_keys - self.new_model_keys + for rem_app_label, rem_model_name in removed_models: + if rem_app_label == app_label: + rem_model_state = self.from_state.models[rem_app_label, rem_model_name] + rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields) + if model_fields_def == rem_model_fields_def: + if self.questioner.ask_rename_model(rem_model_state, model_state): + model_opts = self.new_apps.get_model(app_label, model_name)._meta + dependencies = [] + for field in model_opts.get_fields(): + if field.is_relation: + dependencies.extend(self._get_dependencies_for_foreign_key(field)) + self.add_operation( + app_label, + operations.RenameModel( + old_name=rem_model_state.name, + new_name=model_state.name, + ), + dependencies=dependencies, + ) + self.renamed_models[app_label, model_name] = rem_model_name + renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name) + self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % ( + model_state.app_label, + model_state.name, + ) + self.old_model_keys.remove((rem_app_label, rem_model_name)) + self.old_model_keys.add((app_label, model_name)) + break + + def generate_created_models(self): + """ + Find all new models (both managed and unmanaged) and make create + operations for them as well as separate operations to create any + foreign key or M2M relationships (these are optimized later, if + possible). + + Defer any model options that refer to collections of fields that might + be deferred (e.g. unique_together, index_together). + """ + old_keys = self.old_model_keys | self.old_unmanaged_keys + added_models = self.new_model_keys - old_keys + added_unmanaged_models = self.new_unmanaged_keys - old_keys + all_added_models = chain( + sorted(added_models, key=self.swappable_first_key, reverse=True), + sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True) + ) + for app_label, model_name in all_added_models: + model_state = self.to_state.models[app_label, model_name] + model_opts = self.new_apps.get_model(app_label, model_name)._meta + # Gather related fields + related_fields = {} + primary_key_rel = None + for field in model_opts.local_fields: + if field.remote_field: + if field.remote_field.model: + if field.primary_key: + primary_key_rel = field.remote_field.model + elif not field.remote_field.parent_link: + related_fields[field.name] = field + # through will be none on M2Ms on swapped-out models; + # we can treat lack of through as auto_created=True, though. + if (getattr(field.remote_field, "through", None) and + not field.remote_field.through._meta.auto_created): + related_fields[field.name] = field + for field in model_opts.local_many_to_many: + if field.remote_field.model: + related_fields[field.name] = field + if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: + related_fields[field.name] = field + # Are there indexes/unique|index_together to defer? + indexes = model_state.options.pop('indexes') + unique_together = model_state.options.pop('unique_together', None) + index_together = model_state.options.pop('index_together', None) + order_with_respect_to = model_state.options.pop('order_with_respect_to', None) + # Depend on the deletion of any possible proxy version of us + dependencies = [ + (app_label, model_name, None, False), + ] + # Depend on all bases + for base in model_state.bases: + if isinstance(base, str) and "." in base: + base_app_label, base_name = base.split(".", 1) + dependencies.append((base_app_label, base_name, None, True)) + # Depend on the other end of the primary key if it's a relation + if primary_key_rel: + dependencies.append(( + primary_key_rel._meta.app_label, + primary_key_rel._meta.object_name, + None, + True + )) + # Generate creation operation + self.add_operation( + app_label, + operations.CreateModel( + name=model_state.name, + fields=[d for d in model_state.fields if d[0] not in related_fields], + options=model_state.options, + bases=model_state.bases, + managers=model_state.managers, + ), + dependencies=dependencies, + beginning=True, + ) + + # Don't add operations which modify the database for unmanaged models + if not model_opts.managed: + continue + + # Generate operations for each related field + for name, field in sorted(related_fields.items()): + dependencies = self._get_dependencies_for_foreign_key(field) + # Depend on our own model being created + dependencies.append((app_label, model_name, None, True)) + # Make operation + self.add_operation( + app_label, + operations.AddField( + model_name=model_name, + name=name, + field=field, + ), + dependencies=list(set(dependencies)), + ) + # Generate other opns + related_dependencies = [ + (app_label, model_name, name, True) + for name, field in sorted(related_fields.items()) + ] + related_dependencies.append((app_label, model_name, None, True)) + for index in indexes: + self.add_operation( + app_label, + operations.AddIndex( + model_name=model_name, + index=index, + ), + dependencies=related_dependencies, + ) + if unique_together: + self.add_operation( + app_label, + operations.AlterUniqueTogether( + name=model_name, + unique_together=unique_together, + ), + dependencies=related_dependencies + ) + if index_together: + self.add_operation( + app_label, + operations.AlterIndexTogether( + name=model_name, + index_together=index_together, + ), + dependencies=related_dependencies + ) + if order_with_respect_to: + self.add_operation( + app_label, + operations.AlterOrderWithRespectTo( + name=model_name, + order_with_respect_to=order_with_respect_to, + ), + dependencies=[ + (app_label, model_name, order_with_respect_to, True), + (app_label, model_name, None, True), + ] + ) + + # Fix relationships if the model changed from a proxy model to a + # concrete model. + if (app_label, model_name) in self.old_proxy_keys: + for related_object in model_opts.related_objects: + self.add_operation( + related_object.related_model._meta.app_label, + operations.AlterField( + model_name=related_object.related_model._meta.object_name, + name=related_object.field.name, + field=related_object.field, + ), + dependencies=[(app_label, model_name, None, True)], + ) + + def generate_created_proxies(self): + """ + Make CreateModel statements for proxy models. Use the same statements + as that way there's less code duplication, but of course for proxy + models it's safe to skip all the pointless field stuff and just chuck + out an operation. + """ + added = self.new_proxy_keys - self.old_proxy_keys + for app_label, model_name in sorted(added): + model_state = self.to_state.models[app_label, model_name] + assert model_state.options.get("proxy") + # Depend on the deletion of any possible non-proxy version of us + dependencies = [ + (app_label, model_name, None, False), + ] + # Depend on all bases + for base in model_state.bases: + if isinstance(base, str) and "." in base: + base_app_label, base_name = base.split(".", 1) + dependencies.append((base_app_label, base_name, None, True)) + # Generate creation operation + self.add_operation( + app_label, + operations.CreateModel( + name=model_state.name, + fields=[], + options=model_state.options, + bases=model_state.bases, + managers=model_state.managers, + ), + # Depend on the deletion of any possible non-proxy version of us + dependencies=dependencies, + ) + + def generate_deleted_models(self): + """ + Find all deleted models (managed and unmanaged) and make delete + operations for them as well as separate operations to delete any + foreign key or M2M relationships (these are optimized later, if + possible). + + Also bring forward removal of any model options that refer to + collections of fields - the inverse of generate_created_models(). + """ + new_keys = self.new_model_keys | self.new_unmanaged_keys + deleted_models = self.old_model_keys - new_keys + deleted_unmanaged_models = self.old_unmanaged_keys - new_keys + all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models)) + for app_label, model_name in all_deleted_models: + model_state = self.from_state.models[app_label, model_name] + model = self.old_apps.get_model(app_label, model_name) + if not model._meta.managed: + # Skip here, no need to handle fields for unmanaged models + continue + + # Gather related fields + related_fields = {} + for field in model._meta.local_fields: + if field.remote_field: + if field.remote_field.model: + related_fields[field.name] = field + # through will be none on M2Ms on swapped-out models; + # we can treat lack of through as auto_created=True, though. + if (getattr(field.remote_field, "through", None) and + not field.remote_field.through._meta.auto_created): + related_fields[field.name] = field + for field in model._meta.local_many_to_many: + if field.remote_field.model: + related_fields[field.name] = field + if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: + related_fields[field.name] = field + # Generate option removal first + unique_together = model_state.options.pop('unique_together', None) + index_together = model_state.options.pop('index_together', None) + if unique_together: + self.add_operation( + app_label, + operations.AlterUniqueTogether( + name=model_name, + unique_together=None, + ) + ) + if index_together: + self.add_operation( + app_label, + operations.AlterIndexTogether( + name=model_name, + index_together=None, + ) + ) + # Then remove each related field + for name, field in sorted(related_fields.items()): + self.add_operation( + app_label, + operations.RemoveField( + model_name=model_name, + name=name, + ) + ) + # Finally, remove the model. + # This depends on both the removal/alteration of all incoming fields + # and the removal of all its own related fields, and if it's + # a through model the field that references it. + dependencies = [] + for related_object in model._meta.related_objects: + related_object_app_label = related_object.related_model._meta.app_label + object_name = related_object.related_model._meta.object_name + field_name = related_object.field.name + dependencies.append((related_object_app_label, object_name, field_name, False)) + if not related_object.many_to_many: + dependencies.append((related_object_app_label, object_name, field_name, "alter")) + + for name, field in sorted(related_fields.items()): + dependencies.append((app_label, model_name, name, False)) + # We're referenced in another field's through= + through_user = self.through_users.get((app_label, model_state.name_lower)) + if through_user: + dependencies.append((through_user[0], through_user[1], through_user[2], False)) + # Finally, make the operation, deduping any dependencies + self.add_operation( + app_label, + operations.DeleteModel( + name=model_state.name, + ), + dependencies=list(set(dependencies)), + ) + + def generate_deleted_proxies(self): + """Make DeleteModel options for proxy models.""" + deleted = self.old_proxy_keys - self.new_proxy_keys + for app_label, model_name in sorted(deleted): + model_state = self.from_state.models[app_label, model_name] + assert model_state.options.get("proxy") + self.add_operation( + app_label, + operations.DeleteModel( + name=model_state.name, + ), + ) + + def generate_renamed_fields(self): + """Work out renamed fields.""" + self.renamed_fields = {} + for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) + # Scan to see if this is actually a rename! + field_dec = self.deep_deconstruct(field) + for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys): + if rem_app_label == app_label and rem_model_name == model_name: + old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name)) + if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]: + old_rel_to = old_field_dec[2]['to'] + if old_rel_to in self.renamed_models_rel: + old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to] + if old_field_dec == field_dec: + if self.questioner.ask_rename(model_name, rem_field_name, field_name, field): + self.add_operation( + app_label, + operations.RenameField( + model_name=model_name, + old_name=rem_field_name, + new_name=field_name, + ) + ) + self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) + self.old_field_keys.add((app_label, model_name, field_name)) + self.renamed_fields[app_label, model_name, field_name] = rem_field_name + break + + def generate_added_fields(self): + """Make AddField operations.""" + for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): + self._generate_added_field(app_label, model_name, field_name) + + def _generate_added_field(self, app_label, model_name, field_name): + field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) + # Fields that are foreignkeys/m2ms depend on stuff + dependencies = [] + if field.remote_field and field.remote_field.model: + dependencies.extend(self._get_dependencies_for_foreign_key(field)) + # You can't just add NOT NULL fields with no default or fields + # which don't allow empty strings as default. + preserve_default = True + time_fields = (models.DateField, models.DateTimeField, models.TimeField) + if (not field.null and not field.has_default() and + not field.many_to_many and + not (field.blank and field.empty_strings_allowed) and + not (isinstance(field, time_fields) and field.auto_now)): + field = field.clone() + if isinstance(field, time_fields) and field.auto_now_add: + field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name) + else: + field.default = self.questioner.ask_not_null_addition(field_name, model_name) + preserve_default = False + self.add_operation( + app_label, + operations.AddField( + model_name=model_name, + name=field_name, + field=field, + preserve_default=preserve_default, + ), + dependencies=dependencies, + ) + + def generate_removed_fields(self): + """Make RemoveField operations.""" + for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys): + self._generate_removed_field(app_label, model_name, field_name) + + def _generate_removed_field(self, app_label, model_name, field_name): + self.add_operation( + app_label, + operations.RemoveField( + model_name=model_name, + name=field_name, + ), + # We might need to depend on the removal of an + # order_with_respect_to or index/unique_together operation; + # this is safely ignored if there isn't one + dependencies=[ + (app_label, model_name, field_name, "order_wrt_unset"), + (app_label, model_name, field_name, "foo_together_change"), + ], + ) + + def generate_altered_fields(self): + """ + Make AlterField operations, or possibly RemovedField/AddField if alter + isn's possible. + """ + for app_label, model_name, field_name in sorted(self.old_field_keys & self.new_field_keys): + # Did the field change? + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name) + old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name) + new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) + # Implement any model renames on relations; these are handled by RenameModel + # so we need to exclude them from the comparison + if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None): + rename_key = ( + new_field.remote_field.model._meta.app_label, + new_field.remote_field.model._meta.model_name, + ) + if rename_key in self.renamed_models: + new_field.remote_field.model = old_field.remote_field.model + if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None): + rename_key = ( + new_field.remote_field.through._meta.app_label, + new_field.remote_field.through._meta.model_name, + ) + if rename_key in self.renamed_models: + new_field.remote_field.through = old_field.remote_field.through + old_field_dec = self.deep_deconstruct(old_field) + new_field_dec = self.deep_deconstruct(new_field) + if old_field_dec != new_field_dec: + both_m2m = old_field.many_to_many and new_field.many_to_many + neither_m2m = not old_field.many_to_many and not new_field.many_to_many + if both_m2m or neither_m2m: + # Either both fields are m2m or neither is + preserve_default = True + if (old_field.null and not new_field.null and not new_field.has_default() and + not new_field.many_to_many): + field = new_field.clone() + new_default = self.questioner.ask_not_null_alteration(field_name, model_name) + if new_default is not models.NOT_PROVIDED: + field.default = new_default + preserve_default = False + else: + field = new_field + self.add_operation( + app_label, + operations.AlterField( + model_name=model_name, + name=field_name, + field=field, + preserve_default=preserve_default, + ) + ) + else: + # We cannot alter between m2m and concrete fields + self._generate_removed_field(app_label, model_name, field_name) + self._generate_added_field(app_label, model_name, field_name) + + def create_altered_indexes(self): + option_name = operations.AddIndex.option_name + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + + old_indexes = old_model_state.options[option_name] + new_indexes = new_model_state.options[option_name] + add_idx = [idx for idx in new_indexes if idx not in old_indexes] + rem_idx = [idx for idx in old_indexes if idx not in new_indexes] + + self.altered_indexes.update({ + (app_label, model_name): { + 'added_indexes': add_idx, 'removed_indexes': rem_idx, + } + }) + + def generate_added_indexes(self): + for (app_label, model_name), alt_indexes in self.altered_indexes.items(): + for index in alt_indexes['added_indexes']: + self.add_operation( + app_label, + operations.AddIndex( + model_name=model_name, + index=index, + ) + ) + + def generate_removed_indexes(self): + for (app_label, model_name), alt_indexes in self.altered_indexes.items(): + for index in alt_indexes['removed_indexes']: + self.add_operation( + app_label, + operations.RemoveIndex( + model_name=model_name, + name=index.name, + ) + ) + + def _get_dependencies_for_foreign_key(self, field): + # Account for FKs to swappable models + swappable_setting = getattr(field, 'swappable_setting', None) + if swappable_setting is not None: + dep_app_label = "__setting__" + dep_object_name = swappable_setting + else: + dep_app_label = field.remote_field.model._meta.app_label + dep_object_name = field.remote_field.model._meta.object_name + dependencies = [(dep_app_label, dep_object_name, None, True)] + if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: + dependencies.append(( + field.remote_field.through._meta.app_label, + field.remote_field.through._meta.object_name, + None, + True, + )) + return dependencies + + def _generate_altered_foo_together(self, operation): + option_name = operation.option_name + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + + # We run the old version through the field renames to account for those + old_value = old_model_state.options.get(option_name) + old_value = { + tuple( + self.renamed_fields.get((app_label, model_name, n), n) + for n in unique + ) + for unique in old_value + } if old_value else set() + + new_value = new_model_state.options.get(option_name) + new_value = set(new_value) if new_value else set() + + if old_value != new_value: + dependencies = [] + for foo_togethers in new_value: + for field_name in foo_togethers: + field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) + if field.remote_field and field.remote_field.model: + dependencies.extend(self._get_dependencies_for_foreign_key(field)) + + self.add_operation( + app_label, + operation( + name=model_name, + **{option_name: new_value} + ), + dependencies=dependencies, + ) + + def generate_altered_unique_together(self): + self._generate_altered_foo_together(operations.AlterUniqueTogether) + + def generate_altered_index_together(self): + self._generate_altered_foo_together(operations.AlterIndexTogether) + + def generate_altered_db_table(self): + models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys) + for app_label, model_name in sorted(models_to_check): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + old_db_table_name = old_model_state.options.get('db_table') + new_db_table_name = new_model_state.options.get('db_table') + if old_db_table_name != new_db_table_name: + self.add_operation( + app_label, + operations.AlterModelTable( + name=model_name, + table=new_db_table_name, + ) + ) + + def generate_altered_options(self): + """ + Work out if any non-schema-affecting options have changed and make an + operation to represent them in state changes (in case Python code in + migrations needs them). + """ + models_to_check = self.kept_model_keys.union( + self.kept_proxy_keys, + self.kept_unmanaged_keys, + # unmanaged converted to managed + self.old_unmanaged_keys & self.new_model_keys, + # managed converted to unmanaged + self.old_model_keys & self.new_unmanaged_keys, + ) + + for app_label, model_name in sorted(models_to_check): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + old_options = { + key: value for key, value in old_model_state.options.items() + if key in AlterModelOptions.ALTER_OPTION_KEYS + } + new_options = { + key: value for key, value in new_model_state.options.items() + if key in AlterModelOptions.ALTER_OPTION_KEYS + } + if old_options != new_options: + self.add_operation( + app_label, + operations.AlterModelOptions( + name=model_name, + options=new_options, + ) + ) + + def generate_altered_order_with_respect_to(self): + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + if (old_model_state.options.get("order_with_respect_to") != + new_model_state.options.get("order_with_respect_to")): + # Make sure it comes second if we're adding + # (removal dependency is part of RemoveField) + dependencies = [] + if new_model_state.options.get("order_with_respect_to"): + dependencies.append(( + app_label, + model_name, + new_model_state.options["order_with_respect_to"], + True, + )) + # Actually generate the operation + self.add_operation( + app_label, + operations.AlterOrderWithRespectTo( + name=model_name, + order_with_respect_to=new_model_state.options.get('order_with_respect_to'), + ), + dependencies=dependencies, + ) + + def generate_altered_managers(self): + for app_label, model_name in sorted(self.kept_model_keys): + old_model_name = self.renamed_models.get((app_label, model_name), model_name) + old_model_state = self.from_state.models[app_label, old_model_name] + new_model_state = self.to_state.models[app_label, model_name] + if old_model_state.managers != new_model_state.managers: + self.add_operation( + app_label, + operations.AlterModelManagers( + name=model_name, + managers=new_model_state.managers, + ) + ) + + def arrange_for_graph(self, changes, graph, migration_name=None): + """ + Take a result from changes() and a MigrationGraph, and fix the names + and dependencies of the changes so they extend the graph from the leaf + nodes for each app. + """ + leaves = graph.leaf_nodes() + name_map = {} + for app_label, migrations in list(changes.items()): + if not migrations: + continue + # Find the app label's current leaf node + app_leaf = None + for leaf in leaves: + if leaf[0] == app_label: + app_leaf = leaf + break + # Do they want an initial migration for this app? + if app_leaf is None and not self.questioner.ask_initial(app_label): + # They don't. + for migration in migrations: + name_map[(app_label, migration.name)] = (app_label, "__first__") + del changes[app_label] + continue + # Work out the next number in the sequence + if app_leaf is None: + next_number = 1 + else: + next_number = (self.parse_number(app_leaf[1]) or 0) + 1 + # Name each migration + for i, migration in enumerate(migrations): + if i == 0 and app_leaf: + migration.dependencies.append(app_leaf) + if i == 0 and not app_leaf: + new_name = "0001_%s" % migration_name if migration_name else "0001_initial" + else: + new_name = "%04i_%s" % ( + next_number, + migration_name or self.suggest_name(migration.operations)[:100], + ) + name_map[(app_label, migration.name)] = (app_label, new_name) + next_number += 1 + migration.name = new_name + # Now fix dependencies + for app_label, migrations in changes.items(): + for migration in migrations: + migration.dependencies = [name_map.get(d, d) for d in migration.dependencies] + return changes + + def _trim_to_apps(self, changes, app_labels): + """ + Take changes from arrange_for_graph() and set of app labels, and return + a modified set of changes which trims out as many migrations that are + not in app_labels as possible. Note that some other migrations may + still be present as they may be required dependencies. + """ + # Gather other app dependencies in a first pass + app_dependencies = {} + for app_label, migrations in changes.items(): + for migration in migrations: + for dep_app_label, name in migration.dependencies: + app_dependencies.setdefault(app_label, set()).add(dep_app_label) + required_apps = set(app_labels) + # Keep resolving till there's no change + old_required_apps = None + while old_required_apps != required_apps: + old_required_apps = set(required_apps) + required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps]) + # Remove all migrations that aren't needed + for app_label in list(changes): + if app_label not in required_apps: + del changes[app_label] + return changes + + @classmethod + def suggest_name(cls, ops): + """ + Given a set of operations, suggest a name for the migration they might + represent. Names are not guaranteed to be unique, but put some effort + into the fallback name to avoid VCS conflicts if possible. + """ + if len(ops) == 1: + if isinstance(ops[0], operations.CreateModel): + return ops[0].name_lower + elif isinstance(ops[0], operations.DeleteModel): + return "delete_%s" % ops[0].name_lower + elif isinstance(ops[0], operations.AddField): + return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) + elif isinstance(ops[0], operations.RemoveField): + return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) + elif len(ops) > 1: + if all(isinstance(o, operations.CreateModel) for o in ops): + return "_".join(sorted(o.name_lower for o in ops)) + return "auto_%s" % get_migration_name_timestamp() + + @classmethod + def parse_number(cls, name): + """ + Given a migration name, try to extract a number from the beginning of + it. If no number is found, return None. + """ + match = re.match(r'^\d+', name) + if match: + return int(match.group()) + return None diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/exceptions.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c2e9ceb28c5c20839b983afa56eb407d111909f3 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/exceptions.py @@ -0,0 +1,54 @@ +from django.db.utils import DatabaseError + + +class AmbiguityError(Exception): + """More than one migration matches a name prefix.""" + pass + + +class BadMigrationError(Exception): + """There's a bad migration (unreadable/bad format/etc.).""" + pass + + +class CircularDependencyError(Exception): + """There's an impossible-to-resolve circular dependency.""" + pass + + +class InconsistentMigrationHistory(Exception): + """An applied migration has some of its dependencies not applied.""" + pass + + +class InvalidBasesError(ValueError): + """A model's base classes can't be resolved.""" + pass + + +class IrreversibleError(RuntimeError): + """An irreversible migration is about to be reversed.""" + pass + + +class NodeNotFoundError(LookupError): + """An attempt on a node is made that is not available in the graph.""" + + def __init__(self, message, node, origin=None): + self.message = message + self.origin = origin + self.node = node + + def __str__(self): + return self.message + + def __repr__(self): + return "NodeNotFoundError(%r)" % (self.node, ) + + +class MigrationSchemaMissing(DatabaseError): + pass + + +class InvalidMigrationPlan(ValueError): + pass diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/executor.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7bc70db35d9bc10aec7be6e0860e9526d214ac --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/executor.py @@ -0,0 +1,368 @@ +from django.apps.registry import apps as global_apps +from django.db import migrations, router + +from .exceptions import InvalidMigrationPlan +from .loader import MigrationLoader +from .recorder import MigrationRecorder +from .state import ProjectState + + +class MigrationExecutor: + """ + End-to-end migration execution - load migrations and run them up or down + to a specified set of targets. + """ + + def __init__(self, connection, progress_callback=None): + self.connection = connection + self.loader = MigrationLoader(self.connection) + self.recorder = MigrationRecorder(self.connection) + self.progress_callback = progress_callback + + def migration_plan(self, targets, clean_start=False): + """ + Given a set of targets, return a list of (Migration instance, backwards?). + """ + plan = [] + if clean_start: + applied = set() + else: + applied = set(self.loader.applied_migrations) + for target in targets: + # If the target is (app_label, None), that means unmigrate everything + if target[1] is None: + for root in self.loader.graph.root_nodes(): + if root[0] == target[0]: + for migration in self.loader.graph.backwards_plan(root): + if migration in applied: + plan.append((self.loader.graph.nodes[migration], True)) + applied.remove(migration) + # If the migration is already applied, do backwards mode, + # otherwise do forwards mode. + elif target in applied: + # Don't migrate backwards all the way to the target node (that + # may roll back dependencies in other apps that don't need to + # be rolled back); instead roll back through target's immediate + # child(ren) in the same app, and no further. + next_in_app = sorted( + n for n in + self.loader.graph.node_map[target].children + if n[0] == target[0] + ) + for node in next_in_app: + for migration in self.loader.graph.backwards_plan(node): + if migration in applied: + plan.append((self.loader.graph.nodes[migration], True)) + applied.remove(migration) + else: + for migration in self.loader.graph.forwards_plan(target): + if migration not in applied: + plan.append((self.loader.graph.nodes[migration], False)) + applied.add(migration) + return plan + + def _create_project_state(self, with_applied_migrations=False): + """ + Create a project state including all the applications without + migrations and applied migrations if with_applied_migrations=True. + """ + state = ProjectState(real_apps=list(self.loader.unmigrated_apps)) + if with_applied_migrations: + # Create the forwards plan Django would follow on an empty database + full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) + applied_migrations = { + self.loader.graph.nodes[key] for key in self.loader.applied_migrations + if key in self.loader.graph.nodes + } + for migration, _ in full_plan: + if migration in applied_migrations: + migration.mutate_state(state, preserve=False) + return state + + def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False): + """ + Migrate the database up to the given targets. + + Django first needs to create all project states before a migration is + (un)applied and in a second step run all the database operations. + """ + # The django_migrations table must be present to record applied + # migrations. + self.recorder.ensure_schema() + + if plan is None: + plan = self.migration_plan(targets) + # Create the forwards plan Django would follow on an empty database + full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) + + all_forwards = all(not backwards for mig, backwards in plan) + all_backwards = all(backwards for mig, backwards in plan) + + if not plan: + if state is None: + # The resulting state should include applied migrations. + state = self._create_project_state(with_applied_migrations=True) + elif all_forwards == all_backwards: + # This should only happen if there's a mixed plan + raise InvalidMigrationPlan( + "Migration plans with both forwards and backwards migrations " + "are not supported. Please split your migration process into " + "separate plans of only forwards OR backwards migrations.", + plan + ) + elif all_forwards: + if state is None: + # The resulting state should still include applied migrations. + state = self._create_project_state(with_applied_migrations=True) + state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial) + else: + # No need to check for `elif all_backwards` here, as that condition + # would always evaluate to true. + state = self._migrate_all_backwards(plan, full_plan, fake=fake) + + self.check_replacements() + + return state + + def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial): + """ + Take a list of 2-tuples of the form (migration instance, False) and + apply them in the order they occur in the full_plan. + """ + migrations_to_run = {m[0] for m in plan} + for migration, _ in full_plan: + if not migrations_to_run: + # We remove every migration that we applied from these sets so + # that we can bail out once the last migration has been applied + # and don't always run until the very end of the migration + # process. + break + if migration in migrations_to_run: + if 'apps' not in state.__dict__: + if self.progress_callback: + self.progress_callback("render_start") + state.apps # Render all -- performance critical + if self.progress_callback: + self.progress_callback("render_success") + state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) + migrations_to_run.remove(migration) + + return state + + def _migrate_all_backwards(self, plan, full_plan, fake): + """ + Take a list of 2-tuples of the form (migration instance, True) and + unapply them in reverse order they occur in the full_plan. + + Since unapplying a migration requires the project state prior to that + migration, Django will compute the migration states before each of them + in a first run over the plan and then unapply them in a second run over + the plan. + """ + migrations_to_run = {m[0] for m in plan} + # Holds all migration states prior to the migrations being unapplied + states = {} + state = self._create_project_state() + applied_migrations = { + self.loader.graph.nodes[key] for key in self.loader.applied_migrations + if key in self.loader.graph.nodes + } + if self.progress_callback: + self.progress_callback("render_start") + for migration, _ in full_plan: + if not migrations_to_run: + # We remove every migration that we applied from this set so + # that we can bail out once the last migration has been applied + # and don't always run until the very end of the migration + # process. + break + if migration in migrations_to_run: + if 'apps' not in state.__dict__: + state.apps # Render all -- performance critical + # The state before this migration + states[migration] = state + # The old state keeps as-is, we continue with the new state + state = migration.mutate_state(state, preserve=True) + migrations_to_run.remove(migration) + elif migration in applied_migrations: + # Only mutate the state if the migration is actually applied + # to make sure the resulting state doesn't include changes + # from unrelated migrations. + migration.mutate_state(state, preserve=False) + if self.progress_callback: + self.progress_callback("render_success") + + for migration, _ in plan: + self.unapply_migration(states[migration], migration, fake=fake) + applied_migrations.remove(migration) + + # Generate the post migration state by starting from the state before + # the last migration is unapplied and mutating it to include all the + # remaining applied migrations. + last_unapplied_migration = plan[-1][0] + state = states[last_unapplied_migration] + for index, (migration, _) in enumerate(full_plan): + if migration == last_unapplied_migration: + for migration, _ in full_plan[index:]: + if migration in applied_migrations: + migration.mutate_state(state, preserve=False) + break + + return state + + def collect_sql(self, plan): + """ + Take a migration plan and return a list of collected SQL statements + that represent the best-efforts version of that plan. + """ + statements = [] + state = None + for migration, backwards in plan: + with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor: + if state is None: + state = self.loader.project_state((migration.app_label, migration.name), at_end=False) + if not backwards: + state = migration.apply(state, schema_editor, collect_sql=True) + else: + state = migration.unapply(state, schema_editor, collect_sql=True) + statements.extend(schema_editor.collected_sql) + return statements + + def apply_migration(self, state, migration, fake=False, fake_initial=False): + """Run a migration forwards.""" + if self.progress_callback: + self.progress_callback("apply_start", migration, fake) + if not fake: + if fake_initial: + # Test to see if this is an already-applied initial migration + applied, state = self.detect_soft_applied(state, migration) + if applied: + fake = True + if not fake: + # Alright, do it normally + with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: + state = migration.apply(state, schema_editor) + # For replacement migrations, record individual statuses + if migration.replaces: + for app_label, name in migration.replaces: + self.recorder.record_applied(app_label, name) + else: + self.recorder.record_applied(migration.app_label, migration.name) + # Report progress + if self.progress_callback: + self.progress_callback("apply_success", migration, fake) + return state + + def unapply_migration(self, state, migration, fake=False): + """Run a migration backwards.""" + if self.progress_callback: + self.progress_callback("unapply_start", migration, fake) + if not fake: + with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: + state = migration.unapply(state, schema_editor) + # For replacement migrations, record individual statuses + if migration.replaces: + for app_label, name in migration.replaces: + self.recorder.record_unapplied(app_label, name) + else: + self.recorder.record_unapplied(migration.app_label, migration.name) + # Report progress + if self.progress_callback: + self.progress_callback("unapply_success", migration, fake) + return state + + def check_replacements(self): + """ + Mark replacement migrations applied if their replaced set all are. + + Do this unconditionally on every migrate, rather than just when + migrations are applied or unapplied, to correctly handle the case + when a new squash migration is pushed to a deployment that already had + all its replaced migrations applied. In this case no new migration will + be applied, but the applied state of the squashed migration must be + maintained. + """ + applied = self.recorder.applied_migrations() + for key, migration in self.loader.replacements.items(): + all_applied = all(m in applied for m in migration.replaces) + if all_applied and key not in applied: + self.recorder.record_applied(*key) + + def detect_soft_applied(self, project_state, migration): + """ + Test whether a migration has been implicitly applied - that the + tables or columns it would create exist. This is intended only for use + on initial migrations (as it only looks for CreateModel and AddField). + """ + def should_skip_detecting_model(migration, model): + """ + No need to detect tables for proxy models, unmanaged models, or + models that can't be migrated on the current database. + """ + return ( + model._meta.proxy or not model._meta.managed or not + router.allow_migrate( + self.connection.alias, migration.app_label, + model_name=model._meta.model_name, + ) + ) + + if migration.initial is None: + # Bail if the migration isn't the first one in its app + if any(app == migration.app_label for app, name in migration.dependencies): + return False, project_state + elif migration.initial is False: + # Bail if it's NOT an initial migration + return False, project_state + + if project_state is None: + after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) + else: + after_state = migration.mutate_state(project_state) + apps = after_state.apps + found_create_model_migration = False + found_add_field_migration = False + existing_table_names = self.connection.introspection.table_names(self.connection.cursor()) + # Make sure all create model and add field operations are done + for operation in migration.operations: + if isinstance(operation, migrations.CreateModel): + model = apps.get_model(migration.app_label, operation.name) + if model._meta.swapped: + # We have to fetch the model to test with from the + # main app cache, as it's not a direct dependency. + model = global_apps.get_model(model._meta.swapped) + if should_skip_detecting_model(migration, model): + continue + if model._meta.db_table not in existing_table_names: + return False, project_state + found_create_model_migration = True + elif isinstance(operation, migrations.AddField): + model = apps.get_model(migration.app_label, operation.model_name) + if model._meta.swapped: + # We have to fetch the model to test with from the + # main app cache, as it's not a direct dependency. + model = global_apps.get_model(model._meta.swapped) + if should_skip_detecting_model(migration, model): + continue + + table = model._meta.db_table + field = model._meta.get_field(operation.name) + + # Handle implicit many-to-many tables created by AddField. + if field.many_to_many: + if field.remote_field.through._meta.db_table not in existing_table_names: + return False, project_state + else: + found_add_field_migration = True + continue + + column_names = [ + column.name for column in + self.connection.introspection.get_table_description(self.connection.cursor(), table) + ] + if field.column not in column_names: + return False, project_state + found_add_field_migration = True + # If we get this far and we found at least one CreateModel or AddField migration, + # the migration is considered implicitly applied. + return (found_create_model_migration or found_add_field_migration), after_state diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/graph.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..687a9b3905bede96c8efedca831ddc73558b0e4d --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/graph.py @@ -0,0 +1,380 @@ +import warnings +from functools import total_ordering + +from django.db.migrations.state import ProjectState +from django.utils.datastructures import OrderedSet + +from .exceptions import CircularDependencyError, NodeNotFoundError + +RECURSION_DEPTH_WARNING = ( + "Maximum recursion depth exceeded while generating migration graph, " + "falling back to iterative approach. If you're experiencing performance issues, " + "consider squashing migrations as described at " + "https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations." +) + + +@total_ordering +class Node: + """ + A single node in the migration graph. Contains direct links to adjacent + nodes in either direction. + """ + def __init__(self, key): + self.key = key + self.children = set() + self.parents = set() + + def __eq__(self, other): + return self.key == other + + def __lt__(self, other): + return self.key < other + + def __hash__(self): + return hash(self.key) + + def __getitem__(self, item): + return self.key[item] + + def __str__(self): + return str(self.key) + + def __repr__(self): + return '<%s: (%r, %r)>' % (self.__class__.__name__, self.key[0], self.key[1]) + + def add_child(self, child): + self.children.add(child) + + def add_parent(self, parent): + self.parents.add(parent) + + # Use manual caching, @cached_property effectively doubles the + # recursion depth for each recursion. + def ancestors(self): + # Use self.key instead of self to speed up the frequent hashing + # when constructing an OrderedSet. + if '_ancestors' not in self.__dict__: + ancestors = [] + for parent in sorted(self.parents, reverse=True): + ancestors += parent.ancestors() + ancestors.append(self.key) + self.__dict__['_ancestors'] = list(OrderedSet(ancestors)) + return self.__dict__['_ancestors'] + + # Use manual caching, @cached_property effectively doubles the + # recursion depth for each recursion. + def descendants(self): + # Use self.key instead of self to speed up the frequent hashing + # when constructing an OrderedSet. + if '_descendants' not in self.__dict__: + descendants = [] + for child in sorted(self.children, reverse=True): + descendants += child.descendants() + descendants.append(self.key) + self.__dict__['_descendants'] = list(OrderedSet(descendants)) + return self.__dict__['_descendants'] + + +class DummyNode(Node): + def __init__(self, key, origin, error_message): + super().__init__(key) + self.origin = origin + self.error_message = error_message + + def promote(self): + """ + Transition dummy to a normal node and clean off excess attribs. + Creating a Node object from scratch would be too much of a + hassle as many dependendies would need to be remapped. + """ + del self.origin + del self.error_message + self.__class__ = Node + + def raise_error(self): + raise NodeNotFoundError(self.error_message, self.key, origin=self.origin) + + +class MigrationGraph: + """ + Represent the digraph of all migrations in a project. + + Each migration is a node, and each dependency is an edge. There are + no implicit dependencies between numbered migrations - the numbering is + merely a convention to aid file listing. Every new numbered migration + has a declared dependency to the previous number, meaning that VCS + branch merges can be detected and resolved. + + Migrations files can be marked as replacing another set of migrations - + this is to support the "squash" feature. The graph handler isn't responsible + for these; instead, the code to load them in here should examine the + migration files and if the replaced migrations are all either unapplied + or not present, it should ignore the replaced ones, load in just the + replacing migration, and repoint any dependencies that pointed to the + replaced migrations to point to the replacing one. + + A node should be a tuple: (app_path, migration_name). The tree special-cases + things within an app - namely, root nodes and leaf nodes ignore dependencies + to other apps. + """ + + def __init__(self): + self.node_map = {} + self.nodes = {} + self.cached = False + + def add_node(self, key, migration): + # If the key already exists, then it must be a dummy node. + dummy_node = self.node_map.get(key) + if dummy_node: + # Promote DummyNode to Node. + dummy_node.promote() + else: + node = Node(key) + self.node_map[key] = node + self.nodes[key] = migration + self.clear_cache() + + def add_dummy_node(self, key, origin, error_message): + node = DummyNode(key, origin, error_message) + self.node_map[key] = node + self.nodes[key] = None + + def add_dependency(self, migration, child, parent, skip_validation=False): + """ + This may create dummy nodes if they don't yet exist. If + `skip_validation=True`, validate_consistency() should be called + afterwards. + """ + if child not in self.nodes: + error_message = ( + "Migration %s dependencies reference nonexistent" + " child node %r" % (migration, child) + ) + self.add_dummy_node(child, migration, error_message) + if parent not in self.nodes: + error_message = ( + "Migration %s dependencies reference nonexistent" + " parent node %r" % (migration, parent) + ) + self.add_dummy_node(parent, migration, error_message) + self.node_map[child].add_parent(self.node_map[parent]) + self.node_map[parent].add_child(self.node_map[child]) + if not skip_validation: + self.validate_consistency() + self.clear_cache() + + def remove_replaced_nodes(self, replacement, replaced): + """ + Remove each of the `replaced` nodes (when they exist). Any + dependencies that were referencing them are changed to reference the + `replacement` node instead. + """ + # Cast list of replaced keys to set to speed up lookup later. + replaced = set(replaced) + try: + replacement_node = self.node_map[replacement] + except KeyError as err: + raise NodeNotFoundError( + "Unable to find replacement node %r. It was either never added" + " to the migration graph, or has been removed." % (replacement, ), + replacement + ) from err + for replaced_key in replaced: + self.nodes.pop(replaced_key, None) + replaced_node = self.node_map.pop(replaced_key, None) + if replaced_node: + for child in replaced_node.children: + child.parents.remove(replaced_node) + # We don't want to create dependencies between the replaced + # node and the replacement node as this would lead to + # self-referencing on the replacement node at a later iteration. + if child.key not in replaced: + replacement_node.add_child(child) + child.add_parent(replacement_node) + for parent in replaced_node.parents: + parent.children.remove(replaced_node) + # Again, to avoid self-referencing. + if parent.key not in replaced: + replacement_node.add_parent(parent) + parent.add_child(replacement_node) + self.clear_cache() + + def remove_replacement_node(self, replacement, replaced): + """ + The inverse operation to `remove_replaced_nodes`. Almost. Remove the + replacement node `replacement` and remap its child nodes to `replaced` + - the list of nodes it would have replaced. Don't remap its parent + nodes as they are expected to be correct already. + """ + self.nodes.pop(replacement, None) + try: + replacement_node = self.node_map.pop(replacement) + except KeyError as err: + raise NodeNotFoundError( + "Unable to remove replacement node %r. It was either never added" + " to the migration graph, or has been removed already." % (replacement, ), + replacement + ) from err + replaced_nodes = set() + replaced_nodes_parents = set() + for key in replaced: + replaced_node = self.node_map.get(key) + if replaced_node: + replaced_nodes.add(replaced_node) + replaced_nodes_parents |= replaced_node.parents + # We're only interested in the latest replaced node, so filter out + # replaced nodes that are parents of other replaced nodes. + replaced_nodes -= replaced_nodes_parents + for child in replacement_node.children: + child.parents.remove(replacement_node) + for replaced_node in replaced_nodes: + replaced_node.add_child(child) + child.add_parent(replaced_node) + for parent in replacement_node.parents: + parent.children.remove(replacement_node) + # NOTE: There is no need to remap parent dependencies as we can + # assume the replaced nodes already have the correct ancestry. + self.clear_cache() + + def validate_consistency(self): + """Ensure there are no dummy nodes remaining in the graph.""" + [n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)] + + def clear_cache(self): + if self.cached: + for node in self.nodes: + self.node_map[node].__dict__.pop('_ancestors', None) + self.node_map[node].__dict__.pop('_descendants', None) + self.cached = False + + def forwards_plan(self, target): + """ + Given a node, return a list of which previous nodes (dependencies) must + be applied, ending with the node itself. This is the list you would + follow if applying the migrations to a database. + """ + if target not in self.nodes: + raise NodeNotFoundError("Node %r not a valid node" % (target, ), target) + # Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic + self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents)) + self.cached = True + node = self.node_map[target] + try: + return node.ancestors() + except RuntimeError: + # fallback to iterative dfs + warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning) + return self.iterative_dfs(node) + + def backwards_plan(self, target): + """ + Given a node, return a list of which dependent nodes (dependencies) + must be unapplied, ending with the node itself. This is the list you + would follow if removing the migrations from a database. + """ + if target not in self.nodes: + raise NodeNotFoundError("Node %r not a valid node" % (target, ), target) + # Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic + self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children)) + self.cached = True + node = self.node_map[target] + try: + return node.descendants() + except RuntimeError: + # fallback to iterative dfs + warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning) + return self.iterative_dfs(node, forwards=False) + + def iterative_dfs(self, start, forwards=True): + """Iterative depth-first search for finding dependencies.""" + visited = [] + stack = [start] + while stack: + node = stack.pop() + visited.append(node) + stack += sorted(node.parents if forwards else node.children) + return list(OrderedSet(reversed(visited))) + + def root_nodes(self, app=None): + """ + Return all root nodes - that is, nodes with no dependencies inside + their app. These are the starting point for an app. + """ + roots = set() + for node in self.nodes: + if not any(key[0] == node[0] for key in self.node_map[node].parents) and (not app or app == node[0]): + roots.add(node) + return sorted(roots) + + def leaf_nodes(self, app=None): + """ + Return all leaf nodes - that is, nodes with no dependents in their app. + These are the "most current" version of an app's schema. + Having more than one per app is technically an error, but one that + gets handled further up, in the interactive command - it's usually the + result of a VCS merge and needs some user input. + """ + leaves = set() + for node in self.nodes: + if not any(key[0] == node[0] for key in self.node_map[node].children) and (not app or app == node[0]): + leaves.add(node) + return sorted(leaves) + + def ensure_not_cyclic(self, start, get_children): + # Algo from GvR: + # http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html + todo = set(self.nodes) + while todo: + node = todo.pop() + stack = [node] + while stack: + top = stack[-1] + for node in get_children(top): + if node in stack: + cycle = stack[stack.index(node):] + raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle)) + if node in todo: + stack.append(node) + todo.remove(node) + break + else: + node = stack.pop() + + def __str__(self): + return 'Graph: %s nodes, %s edges' % self._nodes_and_edges() + + def __repr__(self): + nodes, edges = self._nodes_and_edges() + return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges) + + def _nodes_and_edges(self): + return len(self.nodes), sum(len(node.parents) for node in self.node_map.values()) + + def make_state(self, nodes=None, at_end=True, real_apps=None): + """ + Given a migration node or nodes, return a complete ProjectState for it. + If at_end is False, return the state before the migration has run. + If nodes is not provided, return the overall most current project state. + """ + if nodes is None: + nodes = list(self.leaf_nodes()) + if len(nodes) == 0: + return ProjectState() + if not isinstance(nodes[0], tuple): + nodes = [nodes] + plan = [] + for node in nodes: + for migration in self.forwards_plan(node): + if migration not in plan: + if not at_end and migration in nodes: + continue + plan.append(migration) + project_state = ProjectState(real_apps=real_apps) + for node in plan: + project_state = self.nodes[node].mutate_state(project_state, preserve=False) + return project_state + + def __contains__(self, node): + return node in self.nodes diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/loader.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..a6fe1afc15fb2e3670895b845a8b39c414222fbf --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/loader.py @@ -0,0 +1,317 @@ +import os +import sys +from importlib import import_module, reload + +from django.apps import apps +from django.conf import settings +from django.db.migrations.graph import MigrationGraph +from django.db.migrations.recorder import MigrationRecorder + +from .exceptions import ( + AmbiguityError, BadMigrationError, InconsistentMigrationHistory, + NodeNotFoundError, +) + +MIGRATIONS_MODULE_NAME = 'migrations' + + +class MigrationLoader: + """ + Load migration files from disk and their status from the database. + + Migration files are expected to live in the "migrations" directory of + an app. Their names are entirely unimportant from a code perspective, + but will probably follow the 1234_name.py convention. + + On initialization, this class will scan those directories, and open and + read the python files, looking for a class called Migration, which should + inherit from django.db.migrations.Migration. See + django.db.migrations.migration for what that looks like. + + Some migrations will be marked as "replacing" another set of migrations. + These are loaded into a separate set of migrations away from the main ones. + If all the migrations they replace are either unapplied or missing from + disk, then they are injected into the main set, replacing the named migrations. + Any dependency pointers to the replaced migrations are re-pointed to the + new migration. + + This does mean that this class MUST also talk to the database as well as + to disk, but this is probably fine. We're already not just operating + in memory. + """ + + def __init__(self, connection, load=True, ignore_no_migrations=False): + self.connection = connection + self.disk_migrations = None + self.applied_migrations = None + self.ignore_no_migrations = ignore_no_migrations + if load: + self.build_graph() + + @classmethod + def migrations_module(cls, app_label): + """ + Return the path to the migrations module for the specified app_label + and a boolean indicating if the module is specified in + settings.MIGRATION_MODULE. + """ + if app_label in settings.MIGRATION_MODULES: + return settings.MIGRATION_MODULES[app_label], True + else: + app_package_name = apps.get_app_config(app_label).name + return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False + + def load_disk(self): + """Load the migrations from all INSTALLED_APPS from disk.""" + self.disk_migrations = {} + self.unmigrated_apps = set() + self.migrated_apps = set() + for app_config in apps.get_app_configs(): + # Get the migrations module directory + module_name, explicit = self.migrations_module(app_config.label) + if module_name is None: + self.unmigrated_apps.add(app_config.label) + continue + was_loaded = module_name in sys.modules + try: + module = import_module(module_name) + except ImportError as e: + # I hate doing this, but I don't want to squash other import errors. + # Might be better to try a directory check directly. + if ((explicit and self.ignore_no_migrations) or ( + not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))): + self.unmigrated_apps.add(app_config.label) + continue + raise + else: + # Empty directories are namespaces. + # getattr() needed on PY36 and older (replace w/attribute access). + if getattr(module, '__file__', None) is None: + self.unmigrated_apps.add(app_config.label) + continue + # Module is not a package (e.g. migrations.py). + if not hasattr(module, '__path__'): + self.unmigrated_apps.add(app_config.label) + continue + # Force a reload if it's already loaded (tests need this) + if was_loaded: + reload(module) + self.migrated_apps.add(app_config.label) + directory = os.path.dirname(module.__file__) + # Scan for .py files + migration_names = set() + for name in os.listdir(directory): + if name.endswith(".py"): + import_name = name.rsplit(".", 1)[0] + if import_name[0] not in "_.~": + migration_names.add(import_name) + # Load them + for migration_name in migration_names: + migration_module = import_module("%s.%s" % (module_name, migration_name)) + if not hasattr(migration_module, "Migration"): + raise BadMigrationError( + "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) + ) + self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( + migration_name, + app_config.label, + ) + + def get_migration(self, app_label, name_prefix): + """Return the named migration or raise NodeNotFoundError.""" + return self.graph.nodes[app_label, name_prefix] + + def get_migration_by_prefix(self, app_label, name_prefix): + """ + Return the migration(s) which match the given app label and name_prefix. + """ + # Do the search + results = [] + for migration_app_label, migration_name in self.disk_migrations: + if migration_app_label == app_label and migration_name.startswith(name_prefix): + results.append((migration_app_label, migration_name)) + if len(results) > 1: + raise AmbiguityError( + "There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix) + ) + elif len(results) == 0: + raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix)) + else: + return self.disk_migrations[results[0]] + + def check_key(self, key, current_app): + if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: + return key + # Special-case __first__, which means "the first migration" for + # migrated apps, and is ignored for unmigrated apps. It allows + # makemigrations to declare dependencies on apps before they even have + # migrations. + if key[0] == current_app: + # Ignore __first__ references to the same app (#22325) + return + if key[0] in self.unmigrated_apps: + # This app isn't migrated, but something depends on it. + # The models will get auto-added into the state, though + # so we're fine. + return + if key[0] in self.migrated_apps: + try: + if key[1] == "__first__": + return self.graph.root_nodes(key[0])[0] + else: # "__latest__" + return self.graph.leaf_nodes(key[0])[0] + except IndexError: + if self.ignore_no_migrations: + return None + else: + raise ValueError("Dependency on app with no migrations: %s" % key[0]) + raise ValueError("Dependency on unknown app: %s" % key[0]) + + def add_internal_dependencies(self, key, migration): + """ + Internal dependencies need to be added first to ensure `__first__` + dependencies find the correct root node. + """ + for parent in migration.dependencies: + if parent[0] != key[0] or parent[1] == '__first__': + # Ignore __first__ references to the same app (#22325). + continue + self.graph.add_dependency(migration, key, parent, skip_validation=True) + + def add_external_dependencies(self, key, migration): + for parent in migration.dependencies: + # Skip internal dependencies + if key[0] == parent[0]: + continue + parent = self.check_key(parent, key[0]) + if parent is not None: + self.graph.add_dependency(migration, key, parent, skip_validation=True) + for child in migration.run_before: + child = self.check_key(child, key[0]) + if child is not None: + self.graph.add_dependency(migration, child, key, skip_validation=True) + + def build_graph(self): + """ + Build a migration dependency graph using both the disk and database. + You'll need to rebuild the graph if you apply migrations. This isn't + usually a problem as generally migration stuff runs in a one-shot process. + """ + # Load disk data + self.load_disk() + # Load database data + if self.connection is None: + self.applied_migrations = set() + else: + recorder = MigrationRecorder(self.connection) + self.applied_migrations = recorder.applied_migrations() + # To start, populate the migration graph with nodes for ALL migrations + # and their dependencies. Also make note of replacing migrations at this step. + self.graph = MigrationGraph() + self.replacements = {} + for key, migration in self.disk_migrations.items(): + self.graph.add_node(key, migration) + # Internal (aka same-app) dependencies. + self.add_internal_dependencies(key, migration) + # Replacing migrations. + if migration.replaces: + self.replacements[key] = migration + # Add external dependencies now that the internal ones have been resolved. + for key, migration in self.disk_migrations.items(): + self.add_external_dependencies(key, migration) + # Carry out replacements where possible. + for key, migration in self.replacements.items(): + # Get applied status of each of this migration's replacement targets. + applied_statuses = [(target in self.applied_migrations) for target in migration.replaces] + # Ensure the replacing migration is only marked as applied if all of + # its replacement targets are. + if all(applied_statuses): + self.applied_migrations.add(key) + else: + self.applied_migrations.discard(key) + # A replacing migration can be used if either all or none of its + # replacement targets have been applied. + if all(applied_statuses) or (not any(applied_statuses)): + self.graph.remove_replaced_nodes(key, migration.replaces) + else: + # This replacing migration cannot be used because it is partially applied. + # Remove it from the graph and remap dependencies to it (#25945). + self.graph.remove_replacement_node(key, migration.replaces) + # Ensure the graph is consistent. + try: + self.graph.validate_consistency() + except NodeNotFoundError as exc: + # Check if the missing node could have been replaced by any squash + # migration but wasn't because the squash migration was partially + # applied before. In that case raise a more understandable exception + # (#23556). + # Get reverse replacements. + reverse_replacements = {} + for key, migration in self.replacements.items(): + for replaced in migration.replaces: + reverse_replacements.setdefault(replaced, set()).add(key) + # Try to reraise exception with more detail. + if exc.node in reverse_replacements: + candidates = reverse_replacements.get(exc.node, set()) + is_replaced = any(candidate in self.graph.nodes for candidate in candidates) + if not is_replaced: + tries = ', '.join('%s.%s' % c for c in candidates) + raise NodeNotFoundError( + "Migration {0} depends on nonexistent node ('{1}', '{2}'). " + "Django tried to replace migration {1}.{2} with any of [{3}] " + "but wasn't able to because some of the replaced migrations " + "are already applied.".format( + exc.origin, exc.node[0], exc.node[1], tries + ), + exc.node + ) from exc + raise exc + + def check_consistent_history(self, connection): + """ + Raise InconsistentMigrationHistory if any applied migrations have + unapplied dependencies. + """ + recorder = MigrationRecorder(connection) + applied = recorder.applied_migrations() + for migration in applied: + # If the migration is unknown, skip it. + if migration not in self.graph.nodes: + continue + for parent in self.graph.node_map[migration].parents: + if parent not in applied: + # Skip unapplied squashed migrations that have all of their + # `replaces` applied. + if parent in self.replacements: + if all(m in applied for m in self.replacements[parent].replaces): + continue + raise InconsistentMigrationHistory( + "Migration {}.{} is applied before its dependency " + "{}.{} on database '{}'.".format( + migration[0], migration[1], parent[0], parent[1], + connection.alias, + ) + ) + + def detect_conflicts(self): + """ + Look through the loaded graph and detect any conflicts - apps + with more than one leaf migration. Return a dict of the app labels + that conflict with the migration names that conflict. + """ + seen_apps = {} + conflicting_apps = set() + for app_label, migration_name in self.graph.leaf_nodes(): + if app_label in seen_apps: + conflicting_apps.add(app_label) + seen_apps.setdefault(app_label, set()).add(migration_name) + return {app_label: seen_apps[app_label] for app_label in conflicting_apps} + + def project_state(self, nodes=None, at_end=True): + """ + Return a ProjectState object representing the most recent state + that the loaded migrations represent. + + See graph.make_state() for the meaning of "nodes" and "at_end". + """ + return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps)) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/migration.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/migration.py new file mode 100644 index 0000000000000000000000000000000000000000..ffe0b1fb3dded69ebd81a23103d7d3bd54ff00c6 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/migration.py @@ -0,0 +1,191 @@ +from django.db.transaction import atomic + +from .exceptions import IrreversibleError + + +class Migration: + """ + The base class for all migrations. + + Migration files will import this from django.db.migrations.Migration + and subclass it as a class called Migration. It will have one or more + of the following attributes: + + - operations: A list of Operation instances, probably from django.db.migrations.operations + - dependencies: A list of tuples of (app_path, migration_name) + - run_before: A list of tuples of (app_path, migration_name) + - replaces: A list of migration_names + + Note that all migrations come out of migrations and into the Loader or + Graph as instances, having been initialized with their app label and name. + """ + + # Operations to apply during this migration, in order. + operations = [] + + # Other migrations that should be run before this migration. + # Should be a list of (app, migration_name). + dependencies = [] + + # Other migrations that should be run after this one (i.e. have + # this migration added to their dependencies). Useful to make third-party + # apps' migrations run after your AUTH_USER replacement, for example. + run_before = [] + + # Migration names in this app that this migration replaces. If this is + # non-empty, this migration will only be applied if all these migrations + # are not applied. + replaces = [] + + # Is this an initial migration? Initial migrations are skipped on + # --fake-initial if the table or fields already exist. If None, check if + # the migration has any dependencies to determine if there are dependencies + # to tell if db introspection needs to be done. If True, always perform + # introspection. If False, never perform introspection. + initial = None + + # Whether to wrap the whole migration in a transaction. Only has an effect + # on database backends which support transactional DDL. + atomic = True + + def __init__(self, name, app_label): + self.name = name + self.app_label = app_label + # Copy dependencies & other attrs as we might mutate them at runtime + self.operations = list(self.__class__.operations) + self.dependencies = list(self.__class__.dependencies) + self.run_before = list(self.__class__.run_before) + self.replaces = list(self.__class__.replaces) + + def __eq__(self, other): + if not isinstance(other, Migration): + return False + return (self.name == other.name) and (self.app_label == other.app_label) + + def __repr__(self): + return "" % (self.app_label, self.name) + + def __str__(self): + return "%s.%s" % (self.app_label, self.name) + + def __hash__(self): + return hash("%s.%s" % (self.app_label, self.name)) + + def mutate_state(self, project_state, preserve=True): + """ + Take a ProjectState and return a new one with the migration's + operations applied to it. Preserve the original object state by + default and return a mutated state from a copy. + """ + new_state = project_state + if preserve: + new_state = project_state.clone() + + for operation in self.operations: + operation.state_forwards(self.app_label, new_state) + return new_state + + def apply(self, project_state, schema_editor, collect_sql=False): + """ + Take a project_state representing all migrations prior to this one + and a schema_editor for a live database and apply the migration + in a forwards order. + + Return the resulting project state for efficient reuse by following + Migrations. + """ + for operation in self.operations: + # If this operation cannot be represented as SQL, place a comment + # there instead + if collect_sql: + schema_editor.collected_sql.append("--") + if not operation.reduces_to_sql: + schema_editor.collected_sql.append( + "-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:" + ) + schema_editor.collected_sql.append("-- %s" % operation.describe()) + schema_editor.collected_sql.append("--") + if not operation.reduces_to_sql: + continue + # Save the state before the operation has run + old_state = project_state.clone() + operation.state_forwards(self.app_label, project_state) + # Run the operation + atomic_operation = operation.atomic or (self.atomic and operation.atomic is not False) + if not schema_editor.atomic_migration and atomic_operation: + # Force a transaction on a non-transactional-DDL backend or an + # atomic operation inside a non-atomic migration. + with atomic(schema_editor.connection.alias): + operation.database_forwards(self.app_label, schema_editor, old_state, project_state) + else: + # Normal behaviour + operation.database_forwards(self.app_label, schema_editor, old_state, project_state) + return project_state + + def unapply(self, project_state, schema_editor, collect_sql=False): + """ + Take a project_state representing all migrations prior to this one + and a schema_editor for a live database and apply the migration + in a reverse order. + + The backwards migration process consists of two phases: + + 1. The intermediate states from right before the first until right + after the last operation inside this migration are preserved. + 2. The operations are applied in reverse order using the states + recorded in step 1. + """ + # Construct all the intermediate states we need for a reverse migration + to_run = [] + new_state = project_state + # Phase 1 + for operation in self.operations: + # If it's irreversible, error out + if not operation.reversible: + raise IrreversibleError("Operation %s in %s is not reversible" % (operation, self)) + # Preserve new state from previous run to not tamper the same state + # over all operations + new_state = new_state.clone() + old_state = new_state.clone() + operation.state_forwards(self.app_label, new_state) + to_run.insert(0, (operation, old_state, new_state)) + + # Phase 2 + for operation, to_state, from_state in to_run: + if collect_sql: + schema_editor.collected_sql.append("--") + if not operation.reduces_to_sql: + schema_editor.collected_sql.append( + "-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:" + ) + schema_editor.collected_sql.append("-- %s" % operation.describe()) + schema_editor.collected_sql.append("--") + if not operation.reduces_to_sql: + continue + atomic_operation = operation.atomic or (self.atomic and operation.atomic is not False) + if not schema_editor.atomic_migration and atomic_operation: + # Force a transaction on a non-transactional-DDL backend or an + # atomic operation inside a non-atomic migration. + with atomic(schema_editor.connection.alias): + operation.database_backwards(self.app_label, schema_editor, from_state, to_state) + else: + # Normal behaviour + operation.database_backwards(self.app_label, schema_editor, from_state, to_state) + return project_state + + +class SwappableTuple(tuple): + """ + Subclass of tuple so Django can tell this was originally a swappable + dependency when it reads the migration file. + """ + + def __new__(cls, value, setting): + self = tuple.__new__(cls, value) + self.setting = setting + return self + + +def swappable_dependency(value): + """Turn a setting value into a dependency.""" + return SwappableTuple((value.split(".", 1)[0], "__first__"), value) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..894f2ab9c54c351bb6a2cee4edb20a73787c67e1 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/__init__.py @@ -0,0 +1,15 @@ +from .fields import AddField, AlterField, RemoveField, RenameField +from .models import ( + AddIndex, AlterIndexTogether, AlterModelManagers, AlterModelOptions, + AlterModelTable, AlterOrderWithRespectTo, AlterUniqueTogether, CreateModel, + DeleteModel, RemoveIndex, RenameModel, +) +from .special import RunPython, RunSQL, SeparateDatabaseAndState + +__all__ = [ + 'CreateModel', 'DeleteModel', 'AlterModelTable', 'AlterUniqueTogether', + 'RenameModel', 'AlterIndexTogether', 'AlterModelOptions', 'AddIndex', + 'RemoveIndex', 'AddField', 'RemoveField', 'AlterField', 'RenameField', + 'SeparateDatabaseAndState', 'RunSQL', 'RunPython', + 'AlterOrderWithRespectTo', 'AlterModelManagers', +] diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/base.py new file mode 100644 index 0000000000000000000000000000000000000000..6ecbfac40584ad1ce0dcb5095be7286bed465419 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/base.py @@ -0,0 +1,132 @@ +from django.db import router + + +class Operation: + """ + Base class for migration operations. + + It's responsible for both mutating the in-memory model state + (see db/migrations/state.py) to represent what it performs, as well + as actually performing it against a live database. + + Note that some operations won't modify memory state at all (e.g. data + copying operations), and some will need their modifications to be + optionally specified by the user (e.g. custom Python code snippets) + + Due to the way this class deals with deconstruction, it should be + considered immutable. + """ + + # If this migration can be run in reverse. + # Some operations are impossible to reverse, like deleting data. + reversible = True + + # Can this migration be represented as SQL? (things like RunPython cannot) + reduces_to_sql = True + + # Should this operation be forced as atomic even on backends with no + # DDL transaction support (i.e., does it have no DDL, like RunPython) + atomic = False + + # Should this operation be considered safe to elide and optimize across? + elidable = False + + serialization_expand_args = [] + + def __new__(cls, *args, **kwargs): + # We capture the arguments to make returning them trivial + self = object.__new__(cls) + self._constructor_args = (args, kwargs) + return self + + def deconstruct(self): + """ + Return a 3-tuple of class import path (or just name if it lives + under django.db.migrations), positional arguments, and keyword + arguments. + """ + return ( + self.__class__.__name__, + self._constructor_args[0], + self._constructor_args[1], + ) + + def state_forwards(self, app_label, state): + """ + Take the state from the previous migration, and mutate it + so that it matches what this migration would perform. + """ + raise NotImplementedError('subclasses of Operation must provide a state_forwards() method') + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + """ + Perform the mutation on the database schema in the normal + (forwards) direction. + """ + raise NotImplementedError('subclasses of Operation must provide a database_forwards() method') + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + """ + Perform the mutation on the database schema in the reverse + direction - e.g. if this were CreateModel, it would in fact + drop the model's table. + """ + raise NotImplementedError('subclasses of Operation must provide a database_backwards() method') + + def describe(self): + """ + Output a brief summary of what the action does. + """ + return "%s: %s" % (self.__class__.__name__, self._constructor_args) + + def references_model(self, name, app_label=None): + """ + Return True if there is a chance this operation references the given + model name (as a string), with an optional app label for accuracy. + + Used for optimization. If in doubt, return True; + returning a false positive will merely make the optimizer a little + less efficient, while returning a false negative may result in an + unusable optimized migration. + """ + return True + + def references_field(self, model_name, name, app_label=None): + """ + Return True if there is a chance this operation references the given + field name, with an optional app label for accuracy. + + Used for optimization. If in doubt, return True. + """ + return self.references_model(model_name, app_label) + + def allow_migrate_model(self, connection_alias, model): + """ + Return wether or not a model may be migrated. + + This is a thin wrapper around router.allow_migrate_model() that + preemptively rejects any proxy, swapped out, or unmanaged model. + """ + if not model._meta.can_migrate(connection_alias): + return False + + return router.allow_migrate_model(connection_alias, model) + + def reduce(self, operation, in_between, app_label=None): + """ + Return either a list of operations the actual operation should be + replaced with or a boolean that indicates whether or not the specified + operation can be optimized across. + """ + if self.elidable: + return [operation] + elif operation.elidable: + return [self] + return False + + def __repr__(self): + return "<%s %s%s>" % ( + self.__class__.__name__, + ", ".join(map(repr, self._constructor_args[0])), + ",".join(" %s=%r" % x for x in self._constructor_args[1].items()), + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/fields.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..d103e5c127c666410092e8dca879fd889694b524 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/fields.py @@ -0,0 +1,342 @@ +from django.core.exceptions import FieldDoesNotExist +from django.db.models.fields import NOT_PROVIDED +from django.utils.functional import cached_property + +from .base import Operation +from .utils import is_referenced_by_foreign_key + + +class FieldOperation(Operation): + def __init__(self, model_name, name): + self.model_name = model_name + self.name = name + + @cached_property + def model_name_lower(self): + return self.model_name.lower() + + @cached_property + def name_lower(self): + return self.name.lower() + + def is_same_model_operation(self, operation): + return self.model_name_lower == operation.model_name_lower + + def is_same_field_operation(self, operation): + return self.is_same_model_operation(operation) and self.name_lower == operation.name_lower + + def references_model(self, name, app_label=None): + return name.lower() == self.model_name_lower + + def references_field(self, model_name, name, app_label=None): + return self.references_model(model_name) and name.lower() == self.name_lower + + def reduce(self, operation, in_between, app_label=None): + return ( + super().reduce(operation, in_between, app_label=app_label) or + not operation.references_field(self.model_name, self.name, app_label) + ) + + +class AddField(FieldOperation): + """Add a field to a model.""" + + def __init__(self, model_name, name, field, preserve_default=True): + self.field = field + self.preserve_default = preserve_default + super().__init__(model_name, name) + + def deconstruct(self): + kwargs = { + 'model_name': self.model_name, + 'name': self.name, + 'field': self.field, + } + if self.preserve_default is not True: + kwargs['preserve_default'] = self.preserve_default + return ( + self.__class__.__name__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + # If preserve default is off, don't use the default for future state + if not self.preserve_default: + field = self.field.clone() + field.default = NOT_PROVIDED + else: + field = self.field + state.models[app_label, self.model_name_lower].fields.append((self.name, field)) + # Delay rendering of relationships if it's not a relational field + delay = not field.is_relation + state.reload_model(app_label, self.model_name_lower, delay=delay) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + field = to_model._meta.get_field(self.name) + if not self.preserve_default: + field.default = self.field.default + schema_editor.add_field( + from_model, + field, + ) + if not self.preserve_default: + field.default = NOT_PROVIDED + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + from_model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, from_model): + schema_editor.remove_field(from_model, from_model._meta.get_field(self.name)) + + def describe(self): + return "Add field %s to %s" % (self.name, self.model_name) + + def reduce(self, operation, in_between, app_label=None): + if isinstance(operation, FieldOperation) and self.is_same_field_operation(operation): + if isinstance(operation, AlterField): + return [ + AddField( + model_name=self.model_name, + name=operation.name, + field=operation.field, + ), + ] + elif isinstance(operation, RemoveField): + return [] + elif isinstance(operation, RenameField): + return [ + AddField( + model_name=self.model_name, + name=operation.new_name, + field=self.field, + ), + ] + return super().reduce(operation, in_between, app_label=app_label) + + +class RemoveField(FieldOperation): + """Remove a field from a model.""" + + def deconstruct(self): + kwargs = { + 'model_name': self.model_name, + 'name': self.name, + } + return ( + self.__class__.__name__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + new_fields = [] + old_field = None + for name, instance in state.models[app_label, self.model_name_lower].fields: + if name != self.name: + new_fields.append((name, instance)) + else: + old_field = instance + state.models[app_label, self.model_name_lower].fields = new_fields + # Delay rendering of relationships if it's not a relational field + delay = not old_field.is_relation + state.reload_model(app_label, self.model_name_lower, delay=delay) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + from_model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, from_model): + schema_editor.remove_field(from_model, from_model._meta.get_field(self.name)) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + schema_editor.add_field(from_model, to_model._meta.get_field(self.name)) + + def describe(self): + return "Remove field %s from %s" % (self.name, self.model_name) + + +class AlterField(FieldOperation): + """ + Alter a field's database column (e.g. null, max_length) to the provided + new field. + """ + + def __init__(self, model_name, name, field, preserve_default=True): + self.field = field + self.preserve_default = preserve_default + super().__init__(model_name, name) + + def deconstruct(self): + kwargs = { + 'model_name': self.model_name, + 'name': self.name, + 'field': self.field, + } + if self.preserve_default is not True: + kwargs['preserve_default'] = self.preserve_default + return ( + self.__class__.__name__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + if not self.preserve_default: + field = self.field.clone() + field.default = NOT_PROVIDED + else: + field = self.field + state.models[app_label, self.model_name_lower].fields = [ + (n, field if n == self.name else f) + for n, f in + state.models[app_label, self.model_name_lower].fields + ] + # TODO: investigate if old relational fields must be reloaded or if it's + # sufficient if the new field is (#27737). + # Delay rendering of relationships if it's not a relational field and + # not referenced by a foreign key. + delay = ( + not field.is_relation and + not is_referenced_by_foreign_key(state, self.model_name_lower, self.field, self.name) + ) + state.reload_model(app_label, self.model_name_lower, delay=delay) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + from_field = from_model._meta.get_field(self.name) + to_field = to_model._meta.get_field(self.name) + if not self.preserve_default: + to_field.default = self.field.default + schema_editor.alter_field(from_model, from_field, to_field) + if not self.preserve_default: + to_field.default = NOT_PROVIDED + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + self.database_forwards(app_label, schema_editor, from_state, to_state) + + def describe(self): + return "Alter field %s on %s" % (self.name, self.model_name) + + def reduce(self, operation, in_between, app_label=None): + if isinstance(operation, RemoveField) and self.is_same_field_operation(operation): + return [operation] + elif isinstance(operation, RenameField) and self.is_same_field_operation(operation): + return [ + operation, + AlterField( + model_name=self.model_name, + name=operation.new_name, + field=self.field, + ), + ] + return super().reduce(operation, in_between, app_label=app_label) + + +class RenameField(FieldOperation): + """Rename a field on the model. Might affect db_column too.""" + + def __init__(self, model_name, old_name, new_name): + self.old_name = old_name + self.new_name = new_name + super().__init__(model_name, old_name) + + @cached_property + def old_name_lower(self): + return self.old_name.lower() + + @cached_property + def new_name_lower(self): + return self.new_name.lower() + + def deconstruct(self): + kwargs = { + 'model_name': self.model_name, + 'old_name': self.old_name, + 'new_name': self.new_name, + } + return ( + self.__class__.__name__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.model_name_lower] + # Rename the field + fields = model_state.fields + for index, (name, field) in enumerate(fields): + if name == self.old_name: + fields[index] = (self.new_name, field) + # Delay rendering of relationships if it's not a relational + # field and not referenced by a foreign key. + delay = ( + not field.is_relation and + not is_referenced_by_foreign_key(state, self.model_name_lower, field, self.name) + ) + break + else: + raise FieldDoesNotExist( + "%s.%s has no field named '%s'" % (app_label, self.model_name, self.old_name) + ) + # Fix index/unique_together to refer to the new field + options = model_state.options + for option in ('index_together', 'unique_together'): + if option in options: + options[option] = [ + [self.new_name if n == self.old_name else n for n in together] + for together in options[option] + ] + state.reload_model(app_label, self.model_name_lower, delay=delay) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + schema_editor.alter_field( + from_model, + from_model._meta.get_field(self.old_name), + to_model._meta.get_field(self.new_name), + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.model_name) + schema_editor.alter_field( + from_model, + from_model._meta.get_field(self.new_name), + to_model._meta.get_field(self.old_name), + ) + + def describe(self): + return "Rename field %s on %s to %s" % (self.old_name, self.model_name, self.new_name) + + def references_field(self, model_name, name, app_label=None): + return self.references_model(model_name) and ( + name.lower() == self.old_name_lower or + name.lower() == self.new_name_lower + ) + + def reduce(self, operation, in_between, app_label=None): + if (isinstance(operation, RenameField) and + self.is_same_model_operation(operation) and + self.new_name_lower == operation.old_name_lower): + return [ + RenameField( + self.model_name, + self.old_name, + operation.new_name, + ), + ] + # Skip `FieldOperation.reduce` as we want to run `references_field` + # against self.new_name. + return ( + super(FieldOperation, self).reduce(operation, in_between, app_label=app_label) or + not operation.references_field(self.model_name, self.new_name, app_label) + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/models.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/models.py new file mode 100644 index 0000000000000000000000000000000000000000..9754181e84716d5eb223e912991c991913d722a8 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/models.py @@ -0,0 +1,836 @@ +from django.db import models +from django.db.migrations.operations.base import Operation +from django.db.migrations.state import ModelState +from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT +from django.db.models.options import normalize_together +from django.utils.functional import cached_property + +from .fields import ( + AddField, AlterField, FieldOperation, RemoveField, RenameField, +) + + +def _check_for_duplicates(arg_name, objs): + used_vals = set() + for val in objs: + if val in used_vals: + raise ValueError( + "Found duplicate value %s in CreateModel %s argument." % (val, arg_name) + ) + used_vals.add(val) + + +class ModelOperation(Operation): + def __init__(self, name): + self.name = name + + @cached_property + def name_lower(self): + return self.name.lower() + + def references_model(self, name, app_label=None): + return name.lower() == self.name_lower + + def reduce(self, operation, in_between, app_label=None): + return ( + super().reduce(operation, in_between, app_label=app_label) or + not operation.references_model(self.name, app_label) + ) + + +class CreateModel(ModelOperation): + """Create a model's table.""" + + serialization_expand_args = ['fields', 'options', 'managers'] + + def __init__(self, name, fields, options=None, bases=None, managers=None): + self.fields = fields + self.options = options or {} + self.bases = bases or (models.Model,) + self.managers = managers or [] + super().__init__(name) + # Sanity-check that there are no duplicated field names, bases, or + # manager names + _check_for_duplicates('fields', (name for name, _ in self.fields)) + _check_for_duplicates('bases', ( + base._meta.label_lower if hasattr(base, '_meta') else + base.lower() if isinstance(base, str) else base + for base in self.bases + )) + _check_for_duplicates('managers', (name for name, _ in self.managers)) + + def deconstruct(self): + kwargs = { + 'name': self.name, + 'fields': self.fields, + } + if self.options: + kwargs['options'] = self.options + if self.bases and self.bases != (models.Model,): + kwargs['bases'] = self.bases + if self.managers and self.managers != [('objects', models.Manager())]: + kwargs['managers'] = self.managers + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + state.add_model(ModelState( + app_label, + self.name, + list(self.fields), + dict(self.options), + tuple(self.bases), + list(self.managers), + )) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.create_model(model) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.delete_model(model) + + def describe(self): + return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name) + + def references_model(self, name, app_label=None): + name_lower = name.lower() + if name_lower == self.name_lower: + return True + + # Check we didn't inherit from the model + models_to_check = [ + base for base in self.bases + if base is not models.Model and isinstance(base, (models.base.ModelBase, str)) + ] + # Check we have no FKs/M2Ms with it + for fname, field in self.fields: + if field.remote_field: + models_to_check.append(field.remote_field.model) + # Now go over all the models and check against them + for model in models_to_check: + model_app_label, model_name = self.model_to_key(model) + if model_name.lower() == name_lower: + if app_label is None or not model_app_label or model_app_label == app_label: + return True + return False + + def model_to_key(self, model): + """ + Take either a model class or an "app_label.ModelName" string + and return (app_label, object_name). + """ + if isinstance(model, str): + return model.split(".", 1) + else: + return model._meta.app_label, model._meta.object_name + + def reduce(self, operation, in_between, app_label=None): + if (isinstance(operation, DeleteModel) and + self.name_lower == operation.name_lower and + not self.options.get("proxy", False)): + return [] + elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower: + return [ + CreateModel( + operation.new_name, + fields=self.fields, + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, AlterModelOptions) and self.name_lower == operation.name_lower: + new_options = self.options.copy() + new_options.update(operation.options) + return [ + CreateModel( + self.name, + fields=self.fields, + options=new_options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower: + if isinstance(operation, AddField): + # Don't allow optimizations of FKs through models they reference + if hasattr(operation.field, "remote_field") and operation.field.remote_field: + for between in in_between: + # Check that it doesn't point to the model + app_label, object_name = self.model_to_key(operation.field.remote_field.model) + if between.references_model(object_name, app_label): + return False + # Check that it's not through the model + if getattr(operation.field.remote_field, "through", None): + app_label, object_name = self.model_to_key(operation.field.remote_field.through) + if between.references_model(object_name, app_label): + return False + return [ + CreateModel( + self.name, + fields=self.fields + [(operation.name, operation.field)], + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, AlterField): + return [ + CreateModel( + self.name, + fields=[ + (n, operation.field if n == operation.name else v) + for n, v in self.fields + ], + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, RemoveField): + return [ + CreateModel( + self.name, + fields=[ + (n, v) + for n, v in self.fields + if n.lower() != operation.name_lower + ], + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + elif isinstance(operation, RenameField): + return [ + CreateModel( + self.name, + fields=[ + (operation.new_name if n == operation.old_name else n, v) + for n, v in self.fields + ], + options=self.options, + bases=self.bases, + managers=self.managers, + ), + ] + return super().reduce(operation, in_between, app_label=app_label) + + +class DeleteModel(ModelOperation): + """Drop a model's table.""" + + def deconstruct(self): + kwargs = { + 'name': self.name, + } + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + state.remove_model(app_label, self.name_lower) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.delete_model(model) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.create_model(model) + + def describe(self): + return "Delete model %s" % (self.name, ) + + +class RenameModel(ModelOperation): + """Rename a model.""" + + def __init__(self, old_name, new_name): + self.old_name = old_name + self.new_name = new_name + super().__init__(old_name) + + @cached_property + def old_name_lower(self): + return self.old_name.lower() + + @cached_property + def new_name_lower(self): + return self.new_name.lower() + + def deconstruct(self): + kwargs = { + 'old_name': self.old_name, + 'new_name': self.new_name, + } + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def _get_model_tuple(self, remote_model, app_label, model_name): + if remote_model == RECURSIVE_RELATIONSHIP_CONSTANT: + return app_label, model_name.lower() + elif '.' in remote_model: + return tuple(remote_model.lower().split('.')) + else: + return app_label, remote_model.lower() + + def state_forwards(self, app_label, state): + # Add a new model. + renamed_model = state.models[app_label, self.old_name_lower].clone() + renamed_model.name = self.new_name + state.models[app_label, self.new_name_lower] = renamed_model + # Repoint all fields pointing to the old model to the new one. + old_model_tuple = app_label, self.old_name_lower + new_remote_model = '%s.%s' % (app_label, self.new_name) + to_reload = [] + for (model_app_label, model_name), model_state in state.models.items(): + model_changed = False + for index, (name, field) in enumerate(model_state.fields): + changed_field = None + remote_field = field.remote_field + if remote_field: + remote_model_tuple = self._get_model_tuple( + remote_field.model, model_app_label, model_name + ) + if remote_model_tuple == old_model_tuple: + changed_field = field.clone() + changed_field.remote_field.model = new_remote_model + through_model = getattr(remote_field, 'through', None) + if through_model: + through_model_tuple = self._get_model_tuple( + through_model, model_app_label, model_name + ) + if through_model_tuple == old_model_tuple: + if changed_field is None: + changed_field = field.clone() + changed_field.remote_field.through = new_remote_model + if changed_field: + model_state.fields[index] = name, changed_field + model_changed = True + if model_changed: + to_reload.append((model_app_label, model_name)) + # Reload models related to old model before removing the old model. + state.reload_models(to_reload, delay=True) + # Remove the old model. + state.remove_model(app_label, self.old_name_lower) + state.reload_model(app_label, self.new_name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.new_name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.old_name) + # Move the main table + schema_editor.alter_db_table( + new_model, + old_model._meta.db_table, + new_model._meta.db_table, + ) + # Alter the fields pointing to us + for related_object in old_model._meta.related_objects: + if related_object.related_model == old_model: + model = new_model + related_key = (app_label, self.new_name_lower) + else: + model = related_object.related_model + related_key = ( + related_object.related_model._meta.app_label, + related_object.related_model._meta.model_name, + ) + to_field = to_state.apps.get_model( + *related_key + )._meta.get_field(related_object.field.name) + schema_editor.alter_field( + model, + related_object.field, + to_field, + ) + # Rename M2M fields whose name is based on this model's name. + fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many) + for (old_field, new_field) in fields: + # Skip self-referential fields as these are renamed above. + if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created: + continue + # Rename the M2M table that's based on this model's name. + old_m2m_model = old_field.remote_field.through + new_m2m_model = new_field.remote_field.through + schema_editor.alter_db_table( + new_m2m_model, + old_m2m_model._meta.db_table, + new_m2m_model._meta.db_table, + ) + # Rename the column in the M2M table that's based on this + # model's name. + schema_editor.alter_field( + new_m2m_model, + old_m2m_model._meta.get_field(old_model._meta.model_name), + new_m2m_model._meta.get_field(new_model._meta.model_name), + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower + self.new_name, self.old_name = self.old_name, self.new_name + + self.database_forwards(app_label, schema_editor, from_state, to_state) + + self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower + self.new_name, self.old_name = self.old_name, self.new_name + + def references_model(self, name, app_label=None): + return ( + name.lower() == self.old_name_lower or + name.lower() == self.new_name_lower + ) + + def describe(self): + return "Rename model %s to %s" % (self.old_name, self.new_name) + + def reduce(self, operation, in_between, app_label=None): + if (isinstance(operation, RenameModel) and + self.new_name_lower == operation.old_name_lower): + return [ + RenameModel( + self.old_name, + operation.new_name, + ), + ] + # Skip `ModelOperation.reduce` as we want to run `references_model` + # against self.new_name. + return ( + super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or + not operation.references_model(self.new_name, app_label) + ) + + +class AlterModelTable(ModelOperation): + """Rename a model's table.""" + + def __init__(self, name, table): + self.table = table + super().__init__(name) + + def deconstruct(self): + kwargs = { + 'name': self.name, + 'table': self.table, + } + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + state.models[app_label, self.name_lower].options["db_table"] = self.table + state.reload_model(app_label, self.name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.name) + schema_editor.alter_db_table( + new_model, + old_model._meta.db_table, + new_model._meta.db_table, + ) + # Rename M2M fields whose name is based on this model's db_table + for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many): + if new_field.remote_field.through._meta.auto_created: + schema_editor.alter_db_table( + new_field.remote_field.through, + old_field.remote_field.through._meta.db_table, + new_field.remote_field.through._meta.db_table, + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + return self.database_forwards(app_label, schema_editor, from_state, to_state) + + def describe(self): + return "Rename table for %s to %s" % ( + self.name, + self.table if self.table is not None else "(default)" + ) + + def reduce(self, operation, in_between, app_label=None): + if isinstance(operation, (AlterModelTable, DeleteModel)) and self.name_lower == operation.name_lower: + return [operation] + return super().reduce(operation, in_between, app_label=app_label) + + +class ModelOptionOperation(ModelOperation): + def reduce(self, operation, in_between, app_label=None): + if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower: + return [operation] + return super().reduce(operation, in_between, app_label=app_label) + + +class FieldRelatedOptionOperation(ModelOptionOperation): + def reduce(self, operation, in_between, app_label=None): + if (isinstance(operation, FieldOperation) and + self.name_lower == operation.model_name_lower and + not self.references_field(operation.model_name, operation.name)): + return [operation, self] + return super().reduce(operation, in_between, app_label=app_label) + + +class AlterUniqueTogether(FieldRelatedOptionOperation): + """ + Change the value of unique_together to the target one. + Input value of unique_together must be a set of tuples. + """ + option_name = "unique_together" + + def __init__(self, name, unique_together): + unique_together = normalize_together(unique_together) + self.unique_together = {tuple(cons) for cons in unique_together} + super().__init__(name) + + def deconstruct(self): + kwargs = { + 'name': self.name, + 'unique_together': self.unique_together, + } + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.name_lower] + model_state.options[self.option_name] = self.unique_together + state.reload_model(app_label, self.name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.name) + schema_editor.alter_unique_together( + new_model, + getattr(old_model._meta, self.option_name, set()), + getattr(new_model._meta, self.option_name, set()), + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + return self.database_forwards(app_label, schema_editor, from_state, to_state) + + def references_field(self, model_name, name, app_label=None): + return ( + self.references_model(model_name, app_label) and + ( + not self.unique_together or + any((name in together) for together in self.unique_together) + ) + ) + + def describe(self): + return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or '')) + + +class AlterIndexTogether(FieldRelatedOptionOperation): + """ + Change the value of index_together to the target one. + Input value of index_together must be a set of tuples. + """ + option_name = "index_together" + + def __init__(self, name, index_together): + index_together = normalize_together(index_together) + self.index_together = {tuple(cons) for cons in index_together} + super().__init__(name) + + def deconstruct(self): + kwargs = { + 'name': self.name, + 'index_together': self.index_together, + } + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.name_lower] + model_state.options[self.option_name] = self.index_together + state.reload_model(app_label, self.name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + new_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, new_model): + old_model = from_state.apps.get_model(app_label, self.name) + schema_editor.alter_index_together( + new_model, + getattr(old_model._meta, self.option_name, set()), + getattr(new_model._meta, self.option_name, set()), + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + return self.database_forwards(app_label, schema_editor, from_state, to_state) + + def references_field(self, model_name, name, app_label=None): + return ( + self.references_model(model_name, app_label) and + ( + not self.index_together or + any((name in together) for together in self.index_together) + ) + ) + + def describe(self): + return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or '')) + + +class AlterOrderWithRespectTo(FieldRelatedOptionOperation): + """Represent a change with the order_with_respect_to option.""" + + def __init__(self, name, order_with_respect_to): + self.order_with_respect_to = order_with_respect_to + super().__init__(name) + + def deconstruct(self): + kwargs = { + 'name': self.name, + 'order_with_respect_to': self.order_with_respect_to, + } + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.name_lower] + model_state.options['order_with_respect_to'] = self.order_with_respect_to + state.reload_model(app_label, self.name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + to_model = to_state.apps.get_model(app_label, self.name) + if self.allow_migrate_model(schema_editor.connection.alias, to_model): + from_model = from_state.apps.get_model(app_label, self.name) + # Remove a field if we need to + if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to: + schema_editor.remove_field(from_model, from_model._meta.get_field("_order")) + # Add a field if we need to (altering the column is untouched as + # it's likely a rename) + elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to: + field = to_model._meta.get_field("_order") + if not field.has_default(): + field.default = 0 + schema_editor.add_field( + from_model, + field, + ) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + self.database_forwards(app_label, schema_editor, from_state, to_state) + + def references_field(self, model_name, name, app_label=None): + return ( + self.references_model(model_name, app_label) and + ( + self.order_with_respect_to is None or + name == self.order_with_respect_to + ) + ) + + def describe(self): + return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to) + + +class AlterModelOptions(ModelOptionOperation): + """ + Set new model options that don't directly affect the database schema + (like verbose_name, permissions, ordering). Python code in migrations + may still need them. + """ + + # Model options we want to compare and preserve in an AlterModelOptions op + ALTER_OPTION_KEYS = [ + "base_manager_name", + "default_manager_name", + "get_latest_by", + "managed", + "ordering", + "permissions", + "default_permissions", + "select_on_save", + "verbose_name", + "verbose_name_plural", + ] + + def __init__(self, name, options): + self.options = options + super().__init__(name) + + def deconstruct(self): + kwargs = { + 'name': self.name, + 'options': self.options, + } + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.name_lower] + model_state.options = dict(model_state.options) + model_state.options.update(self.options) + for key in self.ALTER_OPTION_KEYS: + if key not in self.options and key in model_state.options: + del model_state.options[key] + state.reload_model(app_label, self.name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + pass + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + pass + + def describe(self): + return "Change Meta options on %s" % (self.name, ) + + +class AlterModelManagers(ModelOptionOperation): + """Alter the model's managers.""" + + serialization_expand_args = ['managers'] + + def __init__(self, name, managers): + self.managers = managers + super().__init__(name) + + def deconstruct(self): + return ( + self.__class__.__qualname__, + [self.name, self.managers], + {} + ) + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.name_lower] + model_state.managers = list(self.managers) + state.reload_model(app_label, self.name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + pass + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + pass + + def describe(self): + return "Change managers on %s" % (self.name, ) + + +class IndexOperation(Operation): + option_name = 'indexes' + + @cached_property + def model_name_lower(self): + return self.model_name.lower() + + +class AddIndex(IndexOperation): + """Add an index on a model.""" + + def __init__(self, model_name, index): + self.model_name = model_name + if not index.name: + raise ValueError( + "Indexes passed to AddIndex operations require a name " + "argument. %r doesn't have one." % index + ) + self.index = index + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.model_name_lower] + indexes = list(model_state.options[self.option_name]) + indexes.append(self.index.clone()) + model_state.options[self.option_name] = indexes + state.reload_model(app_label, self.model_name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.add_index(model, self.index) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + schema_editor.remove_index(model, self.index) + + def deconstruct(self): + kwargs = { + 'model_name': self.model_name, + 'index': self.index, + } + return ( + self.__class__.__qualname__, + [], + kwargs, + ) + + def describe(self): + return 'Create index %s on field(s) %s of model %s' % ( + self.index.name, + ', '.join(self.index.fields), + self.model_name, + ) + + +class RemoveIndex(IndexOperation): + """Remove an index from a model.""" + + def __init__(self, model_name, name): + self.model_name = model_name + self.name = name + + def state_forwards(self, app_label, state): + model_state = state.models[app_label, self.model_name_lower] + indexes = model_state.options[self.option_name] + model_state.options[self.option_name] = [idx for idx in indexes if idx.name != self.name] + state.reload_model(app_label, self.model_name_lower, delay=True) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + model = from_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + from_model_state = from_state.models[app_label, self.model_name_lower] + index = from_model_state.get_index_by_name(self.name) + schema_editor.remove_index(model, index) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + model = to_state.apps.get_model(app_label, self.model_name) + if self.allow_migrate_model(schema_editor.connection.alias, model): + to_model_state = to_state.models[app_label, self.model_name_lower] + index = to_model_state.get_index_by_name(self.name) + schema_editor.add_index(model, index) + + def deconstruct(self): + kwargs = { + 'model_name': self.model_name, + 'name': self.name, + } + return ( + self.__class__.__qualname__, + [], + kwargs, + ) + + def describe(self): + return 'Remove index %s from %s' % (self.name, self.model_name) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/special.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/special.py new file mode 100644 index 0000000000000000000000000000000000000000..5a8510ec02890dea0aea2c4fcd97db4232b3d573 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/special.py @@ -0,0 +1,203 @@ +from django.db import router + +from .base import Operation + + +class SeparateDatabaseAndState(Operation): + """ + Take two lists of operations - ones that will be used for the database, + and ones that will be used for the state change. This allows operations + that don't support state change to have it applied, or have operations + that affect the state or not the database, or so on. + """ + + serialization_expand_args = ['database_operations', 'state_operations'] + + def __init__(self, database_operations=None, state_operations=None): + self.database_operations = database_operations or [] + self.state_operations = state_operations or [] + + def deconstruct(self): + kwargs = {} + if self.database_operations: + kwargs['database_operations'] = self.database_operations + if self.state_operations: + kwargs['state_operations'] = self.state_operations + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + def state_forwards(self, app_label, state): + for state_operation in self.state_operations: + state_operation.state_forwards(app_label, state) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + # We calculate state separately in here since our state functions aren't useful + for database_operation in self.database_operations: + to_state = from_state.clone() + database_operation.state_forwards(app_label, to_state) + database_operation.database_forwards(app_label, schema_editor, from_state, to_state) + from_state = to_state + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + # We calculate state separately in here since our state functions aren't useful + to_states = {} + for dbop in self.database_operations: + to_states[dbop] = to_state + to_state = to_state.clone() + dbop.state_forwards(app_label, to_state) + # to_state now has the states of all the database_operations applied + # which is the from_state for the backwards migration of the last + # operation. + for database_operation in reversed(self.database_operations): + from_state = to_state + to_state = to_states[database_operation] + database_operation.database_backwards(app_label, schema_editor, from_state, to_state) + + def describe(self): + return "Custom state/database change combination" + + +class RunSQL(Operation): + """ + Run some raw SQL. A reverse SQL statement may be provided. + + Also accept a list of operations that represent the state change effected + by this SQL change, in case it's custom column/table creation/deletion. + """ + noop = '' + + def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None, elidable=False): + self.sql = sql + self.reverse_sql = reverse_sql + self.state_operations = state_operations or [] + self.hints = hints or {} + self.elidable = elidable + + def deconstruct(self): + kwargs = { + 'sql': self.sql, + } + if self.reverse_sql is not None: + kwargs['reverse_sql'] = self.reverse_sql + if self.state_operations: + kwargs['state_operations'] = self.state_operations + if self.hints: + kwargs['hints'] = self.hints + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + @property + def reversible(self): + return self.reverse_sql is not None + + def state_forwards(self, app_label, state): + for state_operation in self.state_operations: + state_operation.state_forwards(app_label, state) + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): + self._run_sql(schema_editor, self.sql) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + if self.reverse_sql is None: + raise NotImplementedError("You cannot reverse this operation") + if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): + self._run_sql(schema_editor, self.reverse_sql) + + def describe(self): + return "Raw SQL operation" + + def _run_sql(self, schema_editor, sqls): + if isinstance(sqls, (list, tuple)): + for sql in sqls: + params = None + if isinstance(sql, (list, tuple)): + elements = len(sql) + if elements == 2: + sql, params = sql + else: + raise ValueError("Expected a 2-tuple but got %d" % elements) + schema_editor.execute(sql, params=params) + elif sqls != RunSQL.noop: + statements = schema_editor.connection.ops.prepare_sql_script(sqls) + for statement in statements: + schema_editor.execute(statement, params=None) + + +class RunPython(Operation): + """ + Run Python code in a context suitable for doing versioned ORM operations. + """ + + reduces_to_sql = False + + def __init__(self, code, reverse_code=None, atomic=None, hints=None, elidable=False): + self.atomic = atomic + # Forwards code + if not callable(code): + raise ValueError("RunPython must be supplied with a callable") + self.code = code + # Reverse code + if reverse_code is None: + self.reverse_code = None + else: + if not callable(reverse_code): + raise ValueError("RunPython must be supplied with callable arguments") + self.reverse_code = reverse_code + self.hints = hints or {} + self.elidable = elidable + + def deconstruct(self): + kwargs = { + 'code': self.code, + } + if self.reverse_code is not None: + kwargs['reverse_code'] = self.reverse_code + if self.atomic is not None: + kwargs['atomic'] = self.atomic + if self.hints: + kwargs['hints'] = self.hints + return ( + self.__class__.__qualname__, + [], + kwargs + ) + + @property + def reversible(self): + return self.reverse_code is not None + + def state_forwards(self, app_label, state): + # RunPython objects have no state effect. To add some, combine this + # with SeparateDatabaseAndState. + pass + + def database_forwards(self, app_label, schema_editor, from_state, to_state): + # RunPython has access to all models. Ensure that all models are + # reloaded in case any are delayed. + from_state.clear_delayed_apps_cache() + if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): + # We now execute the Python code in a context that contains a 'models' + # object, representing the versioned models as an app registry. + # We could try to override the global cache, but then people will still + # use direct imports, so we go with a documentation approach instead. + self.code(from_state.apps, schema_editor) + + def database_backwards(self, app_label, schema_editor, from_state, to_state): + if self.reverse_code is None: + raise NotImplementedError("You cannot reverse this operation") + if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): + self.reverse_code(from_state.apps, schema_editor) + + def describe(self): + return "Raw Python operation" + + @staticmethod + def noop(apps, schema_editor): + return None diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..af23ea956346ba7e750f1a6c4ce82ff84b38902f --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/operations/utils.py @@ -0,0 +1,9 @@ +def is_referenced_by_foreign_key(state, model_name_lower, field, field_name): + for state_app_label, state_model in state.models: + for _, f in state.models[state_app_label, state_model].fields: + if (f.related_model and + '%s.%s' % (state_app_label, model_name_lower) == f.related_model.lower() and + hasattr(f, 'to_fields')): + if (f.to_fields[0] is None and field.primary_key) or field_name in f.to_fields: + return True + return False diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/optimizer.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..d31ab89d075cacc122d0c2e3bb09bfbfbecd2e4f --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/optimizer.py @@ -0,0 +1,61 @@ +class MigrationOptimizer: + """ + Power the optimization process, where you provide a list of Operations + and you are returned a list of equal or shorter length - operations + are merged into one if possible. + + For example, a CreateModel and an AddField can be optimized into a + new CreateModel, and CreateModel and DeleteModel can be optimized into + nothing. + """ + + def optimize(self, operations, app_label=None): + """ + Main optimization entry point. Pass in a list of Operation instances, + get out a new list of Operation instances. + + Unfortunately, due to the scope of the optimization (two combinable + operations might be separated by several hundred others), this can't be + done as a peephole optimization with checks/output implemented on + the Operations themselves; instead, the optimizer looks at each + individual operation and scans forwards in the list to see if there + are any matches, stopping at boundaries - operations which can't + be optimized over (RunSQL, operations on the same field/model, etc.) + + The inner loop is run until the starting list is the same as the result + list, and then the result is returned. This means that operation + optimization must be stable and always return an equal or shorter list. + + The app_label argument is optional, but if you pass it you'll get more + efficient optimization. + """ + # Internal tracking variable for test assertions about # of loops + self._iterations = 0 + while True: + result = self.optimize_inner(operations, app_label) + self._iterations += 1 + if result == operations: + return result + operations = result + + def optimize_inner(self, operations, app_label=None): + """Inner optimization loop.""" + new_operations = [] + for i, operation in enumerate(operations): + # Compare it to each operation after it + for j, other in enumerate(operations[i + 1:]): + in_between = operations[i + 1:i + j + 1] + result = operation.reduce(other, in_between, app_label) + if isinstance(result, list): + # Optimize! Add result, then remaining others, then return + new_operations.extend(result) + new_operations.extend(in_between) + new_operations.extend(operations[i + j + 2:]) + return new_operations + if not result: + # We can't optimize across `other`. + new_operations.append(operation) + break + else: + new_operations.append(operation) + return new_operations diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/questioner.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/questioner.py new file mode 100644 index 0000000000000000000000000000000000000000..0fd377ec14a2f42b544fb9f50d62c976c8ae85a8 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/questioner.py @@ -0,0 +1,238 @@ +import importlib +import os +import sys + +from django.apps import apps +from django.db.models.fields import NOT_PROVIDED +from django.utils import datetime_safe, timezone + +from .loader import MigrationLoader + + +class MigrationQuestioner: + """ + Give the autodetector responses to questions it might have. + This base class has a built-in noninteractive mode, but the + interactive subclass is what the command-line arguments will use. + """ + + def __init__(self, defaults=None, specified_apps=None, dry_run=None): + self.defaults = defaults or {} + self.specified_apps = specified_apps or set() + self.dry_run = dry_run + + def ask_initial(self, app_label): + """Should we create an initial migration for the app?""" + # If it was specified on the command line, definitely true + if app_label in self.specified_apps: + return True + # Otherwise, we look to see if it has a migrations module + # without any Python files in it, apart from __init__.py. + # Apps from the new app template will have these; the python + # file check will ensure we skip South ones. + try: + app_config = apps.get_app_config(app_label) + except LookupError: # It's a fake app. + return self.defaults.get("ask_initial", False) + migrations_import_path, _ = MigrationLoader.migrations_module(app_config.label) + if migrations_import_path is None: + # It's an application with migrations disabled. + return self.defaults.get("ask_initial", False) + try: + migrations_module = importlib.import_module(migrations_import_path) + except ImportError: + return self.defaults.get("ask_initial", False) + else: + # getattr() needed on PY36 and older (replace with attribute access). + if getattr(migrations_module, "__file__", None): + filenames = os.listdir(os.path.dirname(migrations_module.__file__)) + elif hasattr(migrations_module, "__path__"): + if len(migrations_module.__path__) > 1: + return False + filenames = os.listdir(list(migrations_module.__path__)[0]) + return not any(x.endswith(".py") for x in filenames if x != "__init__.py") + + def ask_not_null_addition(self, field_name, model_name): + """Adding a NOT NULL field to a model.""" + # None means quit + return None + + def ask_not_null_alteration(self, field_name, model_name): + """Changing a NULL field to NOT NULL.""" + # None means quit + return None + + def ask_rename(self, model_name, old_name, new_name, field_instance): + """Was this field really renamed?""" + return self.defaults.get("ask_rename", False) + + def ask_rename_model(self, old_model_state, new_model_state): + """Was this model really renamed?""" + return self.defaults.get("ask_rename_model", False) + + def ask_merge(self, app_label): + """Do you really want to merge these migrations?""" + return self.defaults.get("ask_merge", False) + + def ask_auto_now_add_addition(self, field_name, model_name): + """Adding an auto_now_add field to a model.""" + # None means quit + return None + + +class InteractiveMigrationQuestioner(MigrationQuestioner): + + def _boolean_input(self, question, default=None): + result = input("%s " % question) + if not result and default is not None: + return default + while len(result) < 1 or result[0].lower() not in "yn": + result = input("Please answer yes or no: ") + return result[0].lower() == "y" + + def _choice_input(self, question, choices): + print(question) + for i, choice in enumerate(choices): + print(" %s) %s" % (i + 1, choice)) + result = input("Select an option: ") + while True: + try: + value = int(result) + except ValueError: + pass + else: + if 0 < value <= len(choices): + return value + result = input("Please select a valid option: ") + + def _ask_default(self, default=''): + """ + Prompt for a default value. + + The ``default`` argument allows providing a custom default value (as a + string) which will be shown to the user and used as the return value + if the user doesn't provide any other input. + """ + print("Please enter the default value now, as valid Python") + if default: + print( + "You can accept the default '{}' by pressing 'Enter' or you " + "can provide another value.".format(default) + ) + print("The datetime and django.utils.timezone modules are available, so you can do e.g. timezone.now") + print("Type 'exit' to exit this prompt") + while True: + if default: + prompt = "[default: {}] >>> ".format(default) + else: + prompt = ">>> " + code = input(prompt) + if not code and default: + code = default + if not code: + print("Please enter some code, or 'exit' (with no quotes) to exit.") + elif code == "exit": + sys.exit(1) + else: + try: + return eval(code, {}, {"datetime": datetime_safe, "timezone": timezone}) + except (SyntaxError, NameError) as e: + print("Invalid input: %s" % e) + + def ask_not_null_addition(self, field_name, model_name): + """Adding a NOT NULL field to a model.""" + if not self.dry_run: + choice = self._choice_input( + "You are trying to add a non-nullable field '%s' to %s without a default; " + "we can't do that (the database needs something to populate existing rows).\n" + "Please select a fix:" % (field_name, model_name), + [ + ("Provide a one-off default now (will be set on all existing " + "rows with a null value for this column)"), + "Quit, and let me add a default in models.py", + ] + ) + if choice == 2: + sys.exit(3) + else: + return self._ask_default() + return None + + def ask_not_null_alteration(self, field_name, model_name): + """Changing a NULL field to NOT NULL.""" + if not self.dry_run: + choice = self._choice_input( + "You are trying to change the nullable field '%s' on %s to non-nullable " + "without a default; we can't do that (the database needs something to " + "populate existing rows).\n" + "Please select a fix:" % (field_name, model_name), + [ + ("Provide a one-off default now (will be set on all existing " + "rows with a null value for this column)"), + ("Ignore for now, and let me handle existing rows with NULL myself " + "(e.g. because you added a RunPython or RunSQL operation to handle " + "NULL values in a previous data migration)"), + "Quit, and let me add a default in models.py", + ] + ) + if choice == 2: + return NOT_PROVIDED + elif choice == 3: + sys.exit(3) + else: + return self._ask_default() + return None + + def ask_rename(self, model_name, old_name, new_name, field_instance): + """Was this field really renamed?""" + msg = "Did you rename %s.%s to %s.%s (a %s)? [y/N]" + return self._boolean_input(msg % (model_name, old_name, model_name, new_name, + field_instance.__class__.__name__), False) + + def ask_rename_model(self, old_model_state, new_model_state): + """Was this model really renamed?""" + msg = "Did you rename the %s.%s model to %s? [y/N]" + return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name, + new_model_state.name), False) + + def ask_merge(self, app_label): + return self._boolean_input( + "\nMerging will only work if the operations printed above do not conflict\n" + + "with each other (working on different fields or models)\n" + + "Do you want to merge these migration branches? [y/N]", + False, + ) + + def ask_auto_now_add_addition(self, field_name, model_name): + """Adding an auto_now_add field to a model.""" + if not self.dry_run: + choice = self._choice_input( + "You are trying to add the field '{}' with 'auto_now_add=True' " + "to {} without a default; the database needs something to " + "populate existing rows.\n".format(field_name, model_name), + [ + "Provide a one-off default now (will be set on all " + "existing rows)", + "Quit, and let me add a default in models.py", + ] + ) + if choice == 2: + sys.exit(3) + else: + return self._ask_default(default='timezone.now') + return None + + +class NonInteractiveMigrationQuestioner(MigrationQuestioner): + + def ask_not_null_addition(self, field_name, model_name): + # We can't ask the user, so act like the user aborted. + sys.exit(3) + + def ask_not_null_alteration(self, field_name, model_name): + # We can't ask the user, so set as not provided. + return NOT_PROVIDED + + def ask_auto_now_add_addition(self, field_name, model_name): + # We can't ask the user, so act like the user aborted. + sys.exit(3) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/recorder.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..3a972fe4c635218361b988807c3ed0468c20e5a2 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/recorder.py @@ -0,0 +1,80 @@ +from django.apps.registry import Apps +from django.db import models +from django.db.utils import DatabaseError +from django.utils.timezone import now + +from .exceptions import MigrationSchemaMissing + + +class MigrationRecorder: + """ + Deal with storing migration records in the database. + + Because this table is actually itself used for dealing with model + creation, it's the one thing we can't do normally via migrations. + We manually handle table creation/schema updating (using schema backend) + and then have a floating model to do queries with. + + If a migration is unapplied its row is removed from the table. Having + a row in the table always means a migration is applied. + """ + + class Migration(models.Model): + app = models.CharField(max_length=255) + name = models.CharField(max_length=255) + applied = models.DateTimeField(default=now) + + class Meta: + apps = Apps() + app_label = "migrations" + db_table = "django_migrations" + + def __str__(self): + return "Migration %s for %s" % (self.name, self.app) + + def __init__(self, connection): + self.connection = connection + + @property + def migration_qs(self): + return self.Migration.objects.using(self.connection.alias) + + def has_table(self): + """Return True if the django_migrations table exists.""" + return self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()) + + def ensure_schema(self): + """Ensure the table exists and has the correct schema.""" + # If the table's there, that's fine - we've never changed its schema + # in the codebase. + if self.has_table(): + return + # Make the table + try: + with self.connection.schema_editor() as editor: + editor.create_model(self.Migration) + except DatabaseError as exc: + raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc) + + def applied_migrations(self): + """Return a set of (app, name) of applied migrations.""" + if self.has_table(): + return {tuple(x) for x in self.migration_qs.values_list('app', 'name')} + else: + # If the django_migrations table doesn't exist, then no migrations + # are applied. + return set() + + def record_applied(self, app, name): + """Record that a migration was applied.""" + self.ensure_schema() + self.migration_qs.create(app=app, name=name) + + def record_unapplied(self, app, name): + """Record that a migration was unapplied.""" + self.ensure_schema() + self.migration_qs.filter(app=app, name=name).delete() + + def flush(self): + """Delete all migration records. Useful for testing migrations.""" + self.migration_qs.all().delete() diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/serializer.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..4df3776e543445127c587ca7c216a9d01973c222 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/serializer.py @@ -0,0 +1,371 @@ +import builtins +import collections +import datetime +import decimal +import enum +import functools +import math +import re +import types +import uuid + +from django.db import models +from django.db.migrations.operations.base import Operation +from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject +from django.utils import datetime_safe +from django.utils.functional import LazyObject, Promise +from django.utils.timezone import utc +from django.utils.version import get_docs_version + + +class BaseSerializer: + def __init__(self, value): + self.value = value + + def serialize(self): + raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.') + + +class BaseSequenceSerializer(BaseSerializer): + def _format(self): + raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.') + + def serialize(self): + imports = set() + strings = [] + for item in self.value: + item_string, item_imports = serializer_factory(item).serialize() + imports.update(item_imports) + strings.append(item_string) + value = self._format() + return value % (", ".join(strings)), imports + + +class BaseSimpleSerializer(BaseSerializer): + def serialize(self): + return repr(self.value), set() + + +class ByteTypeSerializer(BaseSerializer): + def serialize(self): + return repr(self.value), set() + + +class DatetimeSerializer(BaseSerializer): + def serialize(self): + if self.value.tzinfo is not None and self.value.tzinfo != utc: + self.value = self.value.astimezone(utc) + value_repr = repr(self.value).replace("", "utc") + if isinstance(self.value, datetime_safe.datetime): + value_repr = "datetime.%s" % value_repr + imports = ["import datetime"] + if self.value.tzinfo is not None: + imports.append("from django.utils.timezone import utc") + return value_repr, set(imports) + + +class DateSerializer(BaseSerializer): + def serialize(self): + value_repr = repr(self.value) + if isinstance(self.value, datetime_safe.date): + value_repr = "datetime.%s" % value_repr + return value_repr, {"import datetime"} + + +class DecimalSerializer(BaseSerializer): + def serialize(self): + return repr(self.value), {"from decimal import Decimal"} + + +class DeconstructableSerializer(BaseSerializer): + @staticmethod + def serialize_deconstructed(path, args, kwargs): + name, imports = DeconstructableSerializer._serialize_path(path) + strings = [] + for arg in args: + arg_string, arg_imports = serializer_factory(arg).serialize() + strings.append(arg_string) + imports.update(arg_imports) + for kw, arg in sorted(kwargs.items()): + arg_string, arg_imports = serializer_factory(arg).serialize() + imports.update(arg_imports) + strings.append("%s=%s" % (kw, arg_string)) + return "%s(%s)" % (name, ", ".join(strings)), imports + + @staticmethod + def _serialize_path(path): + module, name = path.rsplit(".", 1) + if module == "django.db.models": + imports = {"from django.db import models"} + name = "models.%s" % name + else: + imports = {"import %s" % module} + name = path + return name, imports + + def serialize(self): + return self.serialize_deconstructed(*self.value.deconstruct()) + + +class DictionarySerializer(BaseSerializer): + def serialize(self): + imports = set() + strings = [] + for k, v in sorted(self.value.items()): + k_string, k_imports = serializer_factory(k).serialize() + v_string, v_imports = serializer_factory(v).serialize() + imports.update(k_imports) + imports.update(v_imports) + strings.append((k_string, v_string)) + return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports + + +class EnumSerializer(BaseSerializer): + def serialize(self): + enum_class = self.value.__class__ + module = enum_class.__module__ + imports = {"import %s" % module} + v_string, v_imports = serializer_factory(self.value.value).serialize() + imports.update(v_imports) + return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports + + +class FloatSerializer(BaseSimpleSerializer): + def serialize(self): + if math.isnan(self.value) or math.isinf(self.value): + return 'float("{}")'.format(self.value), set() + return super().serialize() + + +class FrozensetSerializer(BaseSequenceSerializer): + def _format(self): + return "frozenset([%s])" + + +class FunctionTypeSerializer(BaseSerializer): + def serialize(self): + if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type): + klass = self.value.__self__ + module = klass.__module__ + return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module} + # Further error checking + if self.value.__name__ == '': + raise ValueError("Cannot serialize function: lambda") + if self.value.__module__ is None: + raise ValueError("Cannot serialize function %r: No module" % self.value) + + module_name = self.value.__module__ + + if '<' not in self.value.__qualname__: # Qualname can include + return '%s.%s' % (module_name, self.value.__qualname__), {'import %s' % self.value.__module__} + + raise ValueError( + 'Could not find function %s in %s.\n' % (self.value.__name__, module_name) + ) + + +class FunctoolsPartialSerializer(BaseSerializer): + def serialize(self): + imports = {'import functools'} + # Serialize functools.partial() arguments + func_string, func_imports = serializer_factory(self.value.func).serialize() + args_string, args_imports = serializer_factory(self.value.args).serialize() + keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize() + # Add any imports needed by arguments + imports.update(func_imports) + imports.update(args_imports) + imports.update(keywords_imports) + return ( + "functools.partial(%s, *%s, **%s)" % ( + func_string, args_string, keywords_string, + ), + imports, + ) + + +class IterableSerializer(BaseSerializer): + def serialize(self): + imports = set() + strings = [] + for item in self.value: + item_string, item_imports = serializer_factory(item).serialize() + imports.update(item_imports) + strings.append(item_string) + # When len(strings)==0, the empty iterable should be serialized as + # "()", not "(,)" because (,) is invalid Python syntax. + value = "(%s)" if len(strings) != 1 else "(%s,)" + return value % (", ".join(strings)), imports + + +class ModelFieldSerializer(DeconstructableSerializer): + def serialize(self): + attr_name, path, args, kwargs = self.value.deconstruct() + return self.serialize_deconstructed(path, args, kwargs) + + +class ModelManagerSerializer(DeconstructableSerializer): + def serialize(self): + as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct() + if as_manager: + name, imports = self._serialize_path(qs_path) + return "%s.as_manager()" % name, imports + else: + return self.serialize_deconstructed(manager_path, args, kwargs) + + +class OperationSerializer(BaseSerializer): + def serialize(self): + from django.db.migrations.writer import OperationWriter + string, imports = OperationWriter(self.value, indentation=0).serialize() + # Nested operation, trailing comma is handled in upper OperationWriter._write() + return string.rstrip(','), imports + + +class RegexSerializer(BaseSerializer): + def serialize(self): + imports = {"import re"} + regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize() + # Turn off default implicit flags (e.g. re.U) because regexes with the + # same implicit and explicit flags aren't equal. + flags = self.value.flags ^ re.compile('').flags + regex_flags, flag_imports = serializer_factory(flags).serialize() + imports.update(pattern_imports) + imports.update(flag_imports) + args = [regex_pattern] + if flags: + args.append(regex_flags) + return "re.compile(%s)" % ', '.join(args), imports + + +class SequenceSerializer(BaseSequenceSerializer): + def _format(self): + return "[%s]" + + +class SetSerializer(BaseSequenceSerializer): + def _format(self): + # Serialize as a set literal except when value is empty because {} + # is an empty dict. + return '{%s}' if self.value else 'set(%s)' + + +class SettingsReferenceSerializer(BaseSerializer): + def serialize(self): + return "settings.%s" % self.value.setting_name, {"from django.conf import settings"} + + +class TextTypeSerializer(BaseSerializer): + def serialize(self): + return repr(self.value), set() + + +class TimedeltaSerializer(BaseSerializer): + def serialize(self): + return repr(self.value), {"import datetime"} + + +class TimeSerializer(BaseSerializer): + def serialize(self): + value_repr = repr(self.value) + if isinstance(self.value, datetime_safe.time): + value_repr = "datetime.%s" % value_repr + return value_repr, {"import datetime"} + + +class TupleSerializer(BaseSequenceSerializer): + def _format(self): + # When len(value)==0, the empty tuple should be serialized as "()", + # not "(,)" because (,) is invalid Python syntax. + return "(%s)" if len(self.value) != 1 else "(%s,)" + + +class TypeSerializer(BaseSerializer): + def serialize(self): + special_cases = [ + (models.Model, "models.Model", []), + ] + for case, string, imports in special_cases: + if case is self.value: + return string, set(imports) + if hasattr(self.value, "__module__"): + module = self.value.__module__ + if module == builtins.__name__: + return self.value.__name__, set() + else: + return "%s.%s" % (module, self.value.__name__), {"import %s" % module} + + +class UUIDSerializer(BaseSerializer): + def serialize(self): + return "uuid.%s" % repr(self.value), {"import uuid"} + + +def serializer_factory(value): + from django.db.migrations.writer import SettingsReference + if isinstance(value, Promise): + value = str(value) + elif isinstance(value, LazyObject): + # The unwrapped value is returned as the first item of the arguments + # tuple. + value = value.__reduce__()[1][0] + + if isinstance(value, models.Field): + return ModelFieldSerializer(value) + if isinstance(value, models.manager.BaseManager): + return ModelManagerSerializer(value) + if isinstance(value, Operation): + return OperationSerializer(value) + if isinstance(value, type): + return TypeSerializer(value) + # Anything that knows how to deconstruct itself. + if hasattr(value, 'deconstruct'): + return DeconstructableSerializer(value) + + # Unfortunately some of these are order-dependent. + if isinstance(value, frozenset): + return FrozensetSerializer(value) + if isinstance(value, list): + return SequenceSerializer(value) + if isinstance(value, set): + return SetSerializer(value) + if isinstance(value, tuple): + return TupleSerializer(value) + if isinstance(value, dict): + return DictionarySerializer(value) + if isinstance(value, enum.Enum): + return EnumSerializer(value) + if isinstance(value, datetime.datetime): + return DatetimeSerializer(value) + if isinstance(value, datetime.date): + return DateSerializer(value) + if isinstance(value, datetime.time): + return TimeSerializer(value) + if isinstance(value, datetime.timedelta): + return TimedeltaSerializer(value) + if isinstance(value, SettingsReference): + return SettingsReferenceSerializer(value) + if isinstance(value, float): + return FloatSerializer(value) + if isinstance(value, (bool, int, type(None))): + return BaseSimpleSerializer(value) + if isinstance(value, bytes): + return ByteTypeSerializer(value) + if isinstance(value, str): + return TextTypeSerializer(value) + if isinstance(value, decimal.Decimal): + return DecimalSerializer(value) + if isinstance(value, functools.partial): + return FunctoolsPartialSerializer(value) + if isinstance(value, (types.FunctionType, types.BuiltinFunctionType, types.MethodType)): + return FunctionTypeSerializer(value) + if isinstance(value, collections.Iterable): + return IterableSerializer(value) + if isinstance(value, (COMPILED_REGEX_TYPE, RegexObject)): + return RegexSerializer(value) + if isinstance(value, uuid.UUID): + return UUIDSerializer(value) + raise ValueError( + "Cannot serialize: %r\nThere are some values Django cannot serialize into " + "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" + "topics/migrations/#migration-serializing" % (value, get_docs_version()) + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/state.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/state.py new file mode 100644 index 0000000000000000000000000000000000000000..4dfa3dd1248b66339e94049773e10e6fe2981486 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/state.py @@ -0,0 +1,602 @@ +import copy +from collections import OrderedDict +from contextlib import contextmanager + +from django.apps import AppConfig +from django.apps.registry import Apps, apps as global_apps +from django.conf import settings +from django.db import models +from django.db.models.fields.proxy import OrderWrt +from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT +from django.db.models.options import DEFAULT_NAMES, normalize_together +from django.db.models.utils import make_model_tuple +from django.utils.functional import cached_property +from django.utils.module_loading import import_string +from django.utils.version import get_docs_version + +from .exceptions import InvalidBasesError + + +def _get_app_label_and_model_name(model, app_label=''): + if isinstance(model, str): + split = model.split('.', 1) + return tuple(split) if len(split) == 2 else (app_label, split[0]) + else: + return model._meta.app_label, model._meta.model_name + + +def _get_related_models(m): + """Return all models that have a direct relationship to the given model.""" + related_models = [ + subclass for subclass in m.__subclasses__() + if issubclass(subclass, models.Model) + ] + related_fields_models = set() + for f in m._meta.get_fields(include_parents=True, include_hidden=True): + if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str): + related_fields_models.add(f.model) + related_models.append(f.related_model) + # Reverse accessors of foreign keys to proxy models are attached to their + # concrete proxied model. + opts = m._meta + if opts.proxy and m in related_fields_models: + related_models.append(opts.concrete_model) + return related_models + + +def get_related_models_tuples(model): + """ + Return a list of typical (app_label, model_name) tuples for all related + models for the given model. + """ + return { + (rel_mod._meta.app_label, rel_mod._meta.model_name) + for rel_mod in _get_related_models(model) + } + + +def get_related_models_recursive(model): + """ + Return all models that have a direct or indirect relationship + to the given model. + + Relationships are either defined by explicit relational fields, like + ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another + model (a superclass is related to its subclasses, but not vice versa). Note, + however, that a model inheriting from a concrete model is also related to + its superclass through the implicit *_ptr OneToOneField on the subclass. + """ + seen = set() + queue = _get_related_models(model) + for rel_mod in queue: + rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name + if (rel_app_label, rel_model_name) in seen: + continue + seen.add((rel_app_label, rel_model_name)) + queue.extend(_get_related_models(rel_mod)) + return seen - {(model._meta.app_label, model._meta.model_name)} + + +class ProjectState: + """ + Represent the entire project's overall state. This is the item that is + passed around - do it here rather than at the app level so that cross-app + FKs/etc. resolve properly. + """ + + def __init__(self, models=None, real_apps=None): + self.models = models or {} + # Apps to include from main registry, usually unmigrated ones + self.real_apps = real_apps or [] + self.is_delayed = False + + def add_model(self, model_state): + app_label, model_name = model_state.app_label, model_state.name_lower + self.models[(app_label, model_name)] = model_state + if 'apps' in self.__dict__: # hasattr would cache the property + self.reload_model(app_label, model_name) + + def remove_model(self, app_label, model_name): + del self.models[app_label, model_name] + if 'apps' in self.__dict__: # hasattr would cache the property + self.apps.unregister_model(app_label, model_name) + # Need to do this explicitly since unregister_model() doesn't clear + # the cache automatically (#24513) + self.apps.clear_cache() + + def _find_reload_model(self, app_label, model_name, delay=False): + if delay: + self.is_delayed = True + + related_models = set() + + try: + old_model = self.apps.get_model(app_label, model_name) + except LookupError: + pass + else: + # Get all relations to and from the old model before reloading, + # as _meta.apps may change + if delay: + related_models = get_related_models_tuples(old_model) + else: + related_models = get_related_models_recursive(old_model) + + # Get all outgoing references from the model to be rendered + model_state = self.models[(app_label, model_name)] + # Directly related models are the models pointed to by ForeignKeys, + # OneToOneFields, and ManyToManyFields. + direct_related_models = set() + for name, field in model_state.fields: + if field.is_relation: + if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT: + continue + rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label) + direct_related_models.add((rel_app_label, rel_model_name.lower())) + + # For all direct related models recursively get all related models. + related_models.update(direct_related_models) + for rel_app_label, rel_model_name in direct_related_models: + try: + rel_model = self.apps.get_model(rel_app_label, rel_model_name) + except LookupError: + pass + else: + if delay: + related_models.update(get_related_models_tuples(rel_model)) + else: + related_models.update(get_related_models_recursive(rel_model)) + + # Include the model itself + related_models.add((app_label, model_name)) + + return related_models + + def reload_model(self, app_label, model_name, delay=False): + if 'apps' in self.__dict__: # hasattr would cache the property + related_models = self._find_reload_model(app_label, model_name, delay) + self._reload(related_models) + + def reload_models(self, models, delay=True): + if 'apps' in self.__dict__: # hasattr would cache the property + related_models = set() + for app_label, model_name in models: + related_models.update(self._find_reload_model(app_label, model_name, delay)) + self._reload(related_models) + + def _reload(self, related_models): + # Unregister all related models + with self.apps.bulk_update(): + for rel_app_label, rel_model_name in related_models: + self.apps.unregister_model(rel_app_label, rel_model_name) + + states_to_be_rendered = [] + # Gather all models states of those models that will be rerendered. + # This includes: + # 1. All related models of unmigrated apps + for model_state in self.apps.real_models: + if (model_state.app_label, model_state.name_lower) in related_models: + states_to_be_rendered.append(model_state) + + # 2. All related models of migrated apps + for rel_app_label, rel_model_name in related_models: + try: + model_state = self.models[rel_app_label, rel_model_name] + except KeyError: + pass + else: + states_to_be_rendered.append(model_state) + + # Render all models + self.apps.render_multiple(states_to_be_rendered) + + def clone(self): + """Return an exact copy of this ProjectState.""" + new_state = ProjectState( + models={k: v.clone() for k, v in self.models.items()}, + real_apps=self.real_apps, + ) + if 'apps' in self.__dict__: + new_state.apps = self.apps.clone() + new_state.is_delayed = self.is_delayed + return new_state + + def clear_delayed_apps_cache(self): + if self.is_delayed and 'apps' in self.__dict__: + del self.__dict__['apps'] + + @cached_property + def apps(self): + return StateApps(self.real_apps, self.models) + + @property + def concrete_apps(self): + self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True) + return self.apps + + @classmethod + def from_apps(cls, apps): + """Take an Apps and return a ProjectState matching it.""" + app_models = {} + for model in apps.get_models(include_swapped=True): + model_state = ModelState.from_model(model) + app_models[(model_state.app_label, model_state.name_lower)] = model_state + return cls(app_models) + + def __eq__(self, other): + return self.models == other.models and set(self.real_apps) == set(other.real_apps) + + +class AppConfigStub(AppConfig): + """Stub of an AppConfig. Only provides a label and a dict of models.""" + # Not used, but required by AppConfig.__init__ + path = '' + + def __init__(self, label): + self.label = label + # App-label and app-name are not the same thing, so technically passing + # in the label here is wrong. In practice, migrations don't care about + # the app name, but we need something unique, and the label works fine. + super().__init__(label, None) + + def import_models(self): + self.models = self.apps.all_models[self.label] + + +class StateApps(Apps): + """ + Subclass of the global Apps registry class to better handle dynamic model + additions and removals. + """ + def __init__(self, real_apps, models, ignore_swappable=False): + # Any apps in self.real_apps should have all their models included + # in the render. We don't use the original model instances as there + # are some variables that refer to the Apps object. + # FKs/M2Ms from real apps are also not included as they just + # mess things up with partial states (due to lack of dependencies) + self.real_models = [] + for app_label in real_apps: + app = global_apps.get_app_config(app_label) + for model in app.get_models(): + self.real_models.append(ModelState.from_model(model, exclude_rels=True)) + # Populate the app registry with a stub for each application. + app_labels = {model_state.app_label for model_state in models.values()} + app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))] + super().__init__(app_configs) + + # The lock gets in the way of copying as implemented in clone(), which + # is called whenever Django duplicates a StateApps before updating it. + self._lock = None + + self.render_multiple(list(models.values()) + self.real_models) + + # There shouldn't be any operations pending at this point. + from django.core.checks.model_checks import _check_lazy_references + ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set() + errors = _check_lazy_references(self, ignore=ignore) + if errors: + raise ValueError("\n".join(error.msg for error in errors)) + + @contextmanager + def bulk_update(self): + # Avoid clearing each model's cache for each change. Instead, clear + # all caches when we're finished updating the model instances. + ready = self.ready + self.ready = False + try: + yield + finally: + self.ready = ready + self.clear_cache() + + def render_multiple(self, model_states): + # We keep trying to render the models in a loop, ignoring invalid + # base errors, until the size of the unrendered models doesn't + # decrease by at least one, meaning there's a base dependency loop/ + # missing base. + if not model_states: + return + # Prevent that all model caches are expired for each render. + with self.bulk_update(): + unrendered_models = model_states + while unrendered_models: + new_unrendered_models = [] + for model in unrendered_models: + try: + model.render(self) + except InvalidBasesError: + new_unrendered_models.append(model) + if len(new_unrendered_models) == len(unrendered_models): + raise InvalidBasesError( + "Cannot resolve bases for %r\nThis can happen if you are inheriting models from an " + "app with migrations (e.g. contrib.auth)\n in an app with no migrations; see " + "https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies " + "for more" % (new_unrendered_models, get_docs_version()) + ) + unrendered_models = new_unrendered_models + + def clone(self): + """Return a clone of this registry.""" + clone = StateApps([], {}) + clone.all_models = copy.deepcopy(self.all_models) + clone.app_configs = copy.deepcopy(self.app_configs) + # Set the pointer to the correct app registry. + for app_config in clone.app_configs.values(): + app_config.apps = clone + # No need to actually clone them, they'll never change + clone.real_models = self.real_models + return clone + + def register_model(self, app_label, model): + self.all_models[app_label][model._meta.model_name] = model + if app_label not in self.app_configs: + self.app_configs[app_label] = AppConfigStub(app_label) + self.app_configs[app_label].apps = self + self.app_configs[app_label].models = OrderedDict() + self.app_configs[app_label].models[model._meta.model_name] = model + self.do_pending_operations(model) + self.clear_cache() + + def unregister_model(self, app_label, model_name): + try: + del self.all_models[app_label][model_name] + del self.app_configs[app_label].models[model_name] + except KeyError: + pass + + +class ModelState: + """ + Represent a Django Model. Don't use the actual Model class as it's not + designed to have its options changed - instead, mutate this one and then + render it into a Model as required. + + Note that while you are allowed to mutate .fields, you are not allowed + to mutate the Field instances inside there themselves - you must instead + assign new ones, as these are not detached during a clone. + """ + + def __init__(self, app_label, name, fields, options=None, bases=None, managers=None): + self.app_label = app_label + self.name = name + self.fields = fields + self.options = options or {} + self.options.setdefault('indexes', []) + self.bases = bases or (models.Model, ) + self.managers = managers or [] + # Sanity-check that fields is NOT a dict. It must be ordered. + if isinstance(self.fields, dict): + raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.") + for name, field in fields: + # Sanity-check that fields are NOT already bound to a model. + if hasattr(field, 'model'): + raise ValueError( + 'ModelState.fields cannot be bound to a model - "%s" is.' % name + ) + # Sanity-check that relation fields are NOT referring to a model class. + if field.is_relation and hasattr(field.related_model, '_meta'): + raise ValueError( + 'ModelState.fields cannot refer to a model class - "%s.to" does. ' + 'Use a string reference instead.' % name + ) + if field.many_to_many and hasattr(field.remote_field.through, '_meta'): + raise ValueError( + 'ModelState.fields cannot refer to a model class - "%s.through" does. ' + 'Use a string reference instead.' % name + ) + # Sanity-check that indexes have their name set. + for index in self.options['indexes']: + if not index.name: + raise ValueError( + "Indexes passed to ModelState require a name attribute. " + "%r doesn't have one." % index + ) + + @cached_property + def name_lower(self): + return self.name.lower() + + @classmethod + def from_model(cls, model, exclude_rels=False): + """Given a model, return a ModelState representing it.""" + # Deconstruct the fields + fields = [] + for field in model._meta.local_fields: + if getattr(field, "remote_field", None) and exclude_rels: + continue + if isinstance(field, OrderWrt): + continue + name = field.name + try: + fields.append((name, field.clone())) + except TypeError as e: + raise TypeError("Couldn't reconstruct field %s on %s: %s" % ( + name, + model._meta.label, + e, + )) + if not exclude_rels: + for field in model._meta.local_many_to_many: + name = field.name + try: + fields.append((name, field.clone())) + except TypeError as e: + raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % ( + name, + model._meta.object_name, + e, + )) + # Extract the options + options = {} + for name in DEFAULT_NAMES: + # Ignore some special options + if name in ["apps", "app_label"]: + continue + elif name in model._meta.original_attrs: + if name == "unique_together": + ut = model._meta.original_attrs["unique_together"] + options[name] = set(normalize_together(ut)) + elif name == "index_together": + it = model._meta.original_attrs["index_together"] + options[name] = set(normalize_together(it)) + elif name == "indexes": + indexes = [idx.clone() for idx in model._meta.indexes] + for index in indexes: + if not index.name: + index.set_name_with_model(model) + options['indexes'] = indexes + else: + options[name] = model._meta.original_attrs[name] + # If we're ignoring relationships, remove all field-listing model + # options (that option basically just means "make a stub model") + if exclude_rels: + for key in ["unique_together", "index_together", "order_with_respect_to"]: + if key in options: + del options[key] + # Private fields are ignored, so remove options that refer to them. + elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}: + del options['order_with_respect_to'] + + def flatten_bases(model): + bases = [] + for base in model.__bases__: + if hasattr(base, "_meta") and base._meta.abstract: + bases.extend(flatten_bases(base)) + else: + bases.append(base) + return bases + + # We can't rely on __mro__ directly because we only want to flatten + # abstract models and not the whole tree. However by recursing on + # __bases__ we may end up with duplicates and ordering issues, we + # therefore discard any duplicates and reorder the bases according + # to their index in the MRO. + flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x)) + + # Make our record + bases = tuple( + ( + base._meta.label_lower + if hasattr(base, "_meta") else + base + ) + for base in flattened_bases + ) + # Ensure at least one base inherits from models.Model + if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases): + bases = (models.Model,) + + managers = [] + manager_names = set() + default_manager_shim = None + for manager in model._meta.managers: + if manager.name in manager_names: + # Skip overridden managers. + continue + elif manager.use_in_migrations: + # Copy managers usable in migrations. + new_manager = copy.copy(manager) + new_manager._set_creation_counter() + elif manager is model._base_manager or manager is model._default_manager: + # Shim custom managers used as default and base managers. + new_manager = models.Manager() + new_manager.model = manager.model + new_manager.name = manager.name + if manager is model._default_manager: + default_manager_shim = new_manager + else: + continue + manager_names.add(manager.name) + managers.append((manager.name, new_manager)) + + # Ignore a shimmed default manager called objects if it's the only one. + if managers == [('objects', default_manager_shim)]: + managers = [] + + # Construct the new ModelState + return cls( + model._meta.app_label, + model._meta.object_name, + fields, + options, + bases, + managers, + ) + + def construct_managers(self): + """Deep-clone the managers using deconstruction.""" + # Sort all managers by their creation counter + sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter) + for mgr_name, manager in sorted_managers: + as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct() + if as_manager: + qs_class = import_string(qs_path) + yield mgr_name, qs_class.as_manager() + else: + manager_class = import_string(manager_path) + yield mgr_name, manager_class(*args, **kwargs) + + def clone(self): + """Return an exact copy of this ModelState.""" + return self.__class__( + app_label=self.app_label, + name=self.name, + fields=list(self.fields), + # Since options are shallow-copied here, operations such as + # AddIndex must replace their option (e.g 'indexes') rather + # than mutating it. + options=dict(self.options), + bases=self.bases, + managers=list(self.managers), + ) + + def render(self, apps): + """Create a Model object from our current state into the given apps.""" + # First, make a Meta object + meta_contents = {'app_label': self.app_label, "apps": apps} + meta_contents.update(self.options) + meta = type("Meta", (), meta_contents) + # Then, work out our bases + try: + bases = tuple( + (apps.get_model(base) if isinstance(base, str) else base) + for base in self.bases + ) + except LookupError: + raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,)) + # Turn fields into a dict for the body, add other bits + body = {name: field.clone() for name, field in self.fields} + body['Meta'] = meta + body['__module__'] = "__fake__" + + # Restore managers + body.update(self.construct_managers()) + # Then, make a Model object (apps.register_model is called in __new__) + return type(self.name, bases, body) + + def get_field_by_name(self, name): + for fname, field in self.fields: + if fname == name: + return field + raise ValueError("No field called %s on model %s" % (name, self.name)) + + def get_index_by_name(self, name): + for index in self.options['indexes']: + if index.name == name: + return index + raise ValueError("No index named %s on model %s" % (name, self.name)) + + def __repr__(self): + return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name) + + def __eq__(self, other): + return ( + (self.app_label == other.app_label) and + (self.name == other.name) and + (len(self.fields) == len(other.fields)) and + all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:])) + for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and + (self.options == other.options) and + (self.bases == other.bases) and + (self.managers == other.managers) + ) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/topological_sort.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/topological_sort.py new file mode 100644 index 0000000000000000000000000000000000000000..7b1ec7cfc50f66a5664d4595e916d406548f77f8 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/topological_sort.py @@ -0,0 +1,32 @@ +def topological_sort_as_sets(dependency_graph): + """ + Variation of Kahn's algorithm (1962) that returns sets. + + Take a dependency graph as a dictionary of node => dependencies. + + Yield sets of items in topological order, where the first set contains + all nodes without dependencies, and each following set contains all + nodes that may depend on the nodes only in the previously yielded sets. + """ + todo = dependency_graph.copy() + while todo: + current = {node for node, deps in todo.items() if len(deps) == 0} + + if not current: + raise ValueError('Cyclic dependency in graph: {}'.format( + ', '.join(repr(x) for x in todo.items()))) + + yield current + + # remove current from todo's nodes & dependencies + todo = {node: (dependencies - current) for node, dependencies in + todo.items() if node not in current} + + +def stable_topological_sort(l, dependency_graph): + result = [] + for layer in topological_sort_as_sets(dependency_graph): + for node in l: + if node in layer: + result.append(node) + return result diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/utils.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8939794e59c00ff87aec120f10f281a87d3a591d --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/utils.py @@ -0,0 +1,17 @@ +import datetime +import re + +COMPILED_REGEX_TYPE = type(re.compile('')) + + +class RegexObject: + def __init__(self, obj): + self.pattern = obj.pattern + self.flags = obj.flags + + def __eq__(self, other): + return self.pattern == other.pattern and self.flags == other.flags + + +def get_migration_name_timestamp(): + return datetime.datetime.now().strftime("%Y%m%d_%H%M") diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/writer.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..aa296db8c58aea8083c70c474e57c8c0446da52f --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/migrations/writer.py @@ -0,0 +1,296 @@ +import os +import re +from importlib import import_module + +from django import get_version +from django.apps import apps +from django.db import migrations +from django.db.migrations.loader import MigrationLoader +from django.db.migrations.serializer import serializer_factory +from django.utils.inspect import get_func_args +from django.utils.module_loading import module_dir +from django.utils.timezone import now + + +class SettingsReference(str): + """ + Special subclass of string which actually references a current settings + value. It's treated as the value in memory, but serializes out to a + settings.NAME attribute reference. + """ + + def __new__(self, value, setting_name): + return str.__new__(self, value) + + def __init__(self, value, setting_name): + self.setting_name = setting_name + + +class OperationWriter: + def __init__(self, operation, indentation=2): + self.operation = operation + self.buff = [] + self.indentation = indentation + + def serialize(self): + + def _write(_arg_name, _arg_value): + if (_arg_name in self.operation.serialization_expand_args and + isinstance(_arg_value, (list, tuple, dict))): + if isinstance(_arg_value, dict): + self.feed('%s={' % _arg_name) + self.indent() + for key, value in _arg_value.items(): + key_string, key_imports = MigrationWriter.serialize(key) + arg_string, arg_imports = MigrationWriter.serialize(value) + args = arg_string.splitlines() + if len(args) > 1: + self.feed('%s: %s' % (key_string, args[0])) + for arg in args[1:-1]: + self.feed(arg) + self.feed('%s,' % args[-1]) + else: + self.feed('%s: %s,' % (key_string, arg_string)) + imports.update(key_imports) + imports.update(arg_imports) + self.unindent() + self.feed('},') + else: + self.feed('%s=[' % _arg_name) + self.indent() + for item in _arg_value: + arg_string, arg_imports = MigrationWriter.serialize(item) + args = arg_string.splitlines() + if len(args) > 1: + for arg in args[:-1]: + self.feed(arg) + self.feed('%s,' % args[-1]) + else: + self.feed('%s,' % arg_string) + imports.update(arg_imports) + self.unindent() + self.feed('],') + else: + arg_string, arg_imports = MigrationWriter.serialize(_arg_value) + args = arg_string.splitlines() + if len(args) > 1: + self.feed('%s=%s' % (_arg_name, args[0])) + for arg in args[1:-1]: + self.feed(arg) + self.feed('%s,' % args[-1]) + else: + self.feed('%s=%s,' % (_arg_name, arg_string)) + imports.update(arg_imports) + + imports = set() + name, args, kwargs = self.operation.deconstruct() + operation_args = get_func_args(self.operation.__init__) + + # See if this operation is in django.db.migrations. If it is, + # We can just use the fact we already have that imported, + # otherwise, we need to add an import for the operation class. + if getattr(migrations, name, None) == self.operation.__class__: + self.feed('migrations.%s(' % name) + else: + imports.add('import %s' % (self.operation.__class__.__module__)) + self.feed('%s.%s(' % (self.operation.__class__.__module__, name)) + + self.indent() + + for i, arg in enumerate(args): + arg_value = arg + arg_name = operation_args[i] + _write(arg_name, arg_value) + + i = len(args) + # Only iterate over remaining arguments + for arg_name in operation_args[i:]: + if arg_name in kwargs: # Don't sort to maintain signature order + arg_value = kwargs[arg_name] + _write(arg_name, arg_value) + + self.unindent() + self.feed('),') + return self.render(), imports + + def indent(self): + self.indentation += 1 + + def unindent(self): + self.indentation -= 1 + + def feed(self, line): + self.buff.append(' ' * (self.indentation * 4) + line) + + def render(self): + return '\n'.join(self.buff) + + +class MigrationWriter: + """ + Take a Migration instance and is able to produce the contents + of the migration file from it. + """ + + def __init__(self, migration): + self.migration = migration + self.needs_manual_porting = False + + def as_string(self): + """Return a string of the file contents.""" + items = { + "replaces_str": "", + "initial_str": "", + } + + imports = set() + + # Deconstruct operations + operations = [] + for operation in self.migration.operations: + operation_string, operation_imports = OperationWriter(operation).serialize() + imports.update(operation_imports) + operations.append(operation_string) + items["operations"] = "\n".join(operations) + "\n" if operations else "" + + # Format dependencies and write out swappable dependencies right + dependencies = [] + for dependency in self.migration.dependencies: + if dependency[0] == "__setting__": + dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1]) + imports.add("from django.conf import settings") + else: + dependencies.append(" %s," % self.serialize(dependency)[0]) + items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else "" + + # Format imports nicely, swapping imports of functions from migration files + # for comments + migration_imports = set() + for line in list(imports): + if re.match(r"^import (.*)\.\d+[^\s]*$", line): + migration_imports.add(line.split("import")[1].strip()) + imports.remove(line) + self.needs_manual_porting = True + + # django.db.migrations is always used, but models import may not be. + # If models import exists, merge it with migrations import. + if "from django.db import models" in imports: + imports.discard("from django.db import models") + imports.add("from django.db import migrations, models") + else: + imports.add("from django.db import migrations") + + # Sort imports by the package / module to be imported (the part after + # "from" in "from ... import ..." or after "import" in "import ..."). + sorted_imports = sorted(imports, key=lambda i: i.split()[1]) + items["imports"] = "\n".join(sorted_imports) + "\n" if imports else "" + if migration_imports: + items["imports"] += ( + "\n\n# Functions from the following migrations need manual " + "copying.\n# Move them and any dependencies into this file, " + "then update the\n# RunPython operations to refer to the local " + "versions:\n# %s" + ) % "\n# ".join(sorted(migration_imports)) + # If there's a replaces, make a string for it + if self.migration.replaces: + items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0] + # Hinting that goes into comment + items.update( + version=get_version(), + timestamp=now().strftime("%Y-%m-%d %H:%M"), + ) + + if self.migration.initial: + items['initial_str'] = "\n initial = True\n" + + return MIGRATION_TEMPLATE % items + + @property + def basedir(self): + migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label) + + if migrations_package_name is None: + raise ValueError( + "Django can't create migrations for app '%s' because " + "migrations have been disabled via the MIGRATION_MODULES " + "setting." % self.migration.app_label + ) + + # See if we can import the migrations module directly + try: + migrations_module = import_module(migrations_package_name) + except ImportError: + pass + else: + try: + return module_dir(migrations_module) + except ValueError: + pass + + # Alright, see if it's a direct submodule of the app + app_config = apps.get_app_config(self.migration.app_label) + maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".") + if app_config.name == maybe_app_name: + return os.path.join(app_config.path, migrations_package_basename) + + # In case of using MIGRATION_MODULES setting and the custom package + # doesn't exist, create one, starting from an existing package + existing_dirs, missing_dirs = migrations_package_name.split("."), [] + while existing_dirs: + missing_dirs.insert(0, existing_dirs.pop(-1)) + try: + base_module = import_module(".".join(existing_dirs)) + except ImportError: + continue + else: + try: + base_dir = module_dir(base_module) + except ValueError: + continue + else: + break + else: + raise ValueError( + "Could not locate an appropriate location to create " + "migrations package %s. Make sure the toplevel " + "package exists and can be imported." % + migrations_package_name) + + final_dir = os.path.join(base_dir, *missing_dirs) + if not os.path.isdir(final_dir): + os.makedirs(final_dir) + for missing_dir in missing_dirs: + base_dir = os.path.join(base_dir, missing_dir) + with open(os.path.join(base_dir, "__init__.py"), "w"): + pass + + return final_dir + + @property + def filename(self): + return "%s.py" % self.migration.name + + @property + def path(self): + return os.path.join(self.basedir, self.filename) + + @classmethod + def serialize(cls, value): + return serializer_factory(value).serialize() + + +MIGRATION_TEMPLATE = """\ +# Generated by Django %(version)s on %(timestamp)s + +%(imports)s + +class Migration(migrations.Migration): +%(replaces_str)s%(initial_str)s + dependencies = [ +%(dependencies)s\ + ] + + operations = [ +%(operations)s\ + ] +""" diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..628f92db3c9ba183a18b5c72f4d6d5669c91d902 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/__init__.py @@ -0,0 +1,76 @@ +from django.core.exceptions import ObjectDoesNotExist +from django.db.models import signals +from django.db.models.aggregates import * # NOQA +from django.db.models.aggregates import __all__ as aggregates_all +from django.db.models.deletion import ( + CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError, +) +from django.db.models.expressions import ( + Case, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, + OuterRef, RowRange, Subquery, Value, ValueRange, When, Window, WindowFrame, +) +from django.db.models.fields import * # NOQA +from django.db.models.fields import __all__ as fields_all +from django.db.models.fields.files import FileField, ImageField +from django.db.models.fields.proxy import OrderWrt +from django.db.models.indexes import * # NOQA +from django.db.models.indexes import __all__ as indexes_all +from django.db.models.lookups import Lookup, Transform +from django.db.models.manager import Manager +from django.db.models.query import ( + Prefetch, Q, QuerySet, prefetch_related_objects, +) +from django.db.models.query_utils import FilteredRelation + +# Imports that would create circular imports if sorted +from django.db.models.base import DEFERRED, Model # isort:skip +from django.db.models.fields.related import ( # isort:skip + ForeignKey, ForeignObject, OneToOneField, ManyToManyField, + ManyToOneRel, ManyToManyRel, OneToOneRel, +) + + +def permalink(func): + """ + Decorator that calls urls.reverse() to return a URL using parameters + returned by the decorated function "func". + + "func" should be a function that returns a tuple in one of the + following formats: + (viewname, viewargs) + (viewname, viewargs, viewkwargs) + """ + import warnings + from functools import wraps + + from django.urls import reverse + from django.utils.deprecation import RemovedInDjango21Warning + + warnings.warn( + 'permalink() is deprecated in favor of calling django.urls.reverse() ' + 'in the decorated method.', + RemovedInDjango21Warning, + stacklevel=2, + ) + + @wraps(func) + def inner(*args, **kwargs): + bits = func(*args, **kwargs) + return reverse(bits[0], None, *bits[1:3]) + return inner + + +__all__ = aggregates_all + fields_all + indexes_all +__all__ += [ + 'ObjectDoesNotExist', 'signals', + 'CASCADE', 'DO_NOTHING', 'PROTECT', 'SET', 'SET_DEFAULT', 'SET_NULL', + 'ProtectedError', + 'Case', 'Exists', 'Expression', 'ExpressionList', 'ExpressionWrapper', 'F', + 'Func', 'OuterRef', 'RowRange', 'Subquery', 'Value', 'ValueRange', 'When', + 'Window', 'WindowFrame', + 'FileField', 'ImageField', 'OrderWrt', 'Lookup', 'Transform', 'Manager', + 'Prefetch', 'Q', 'QuerySet', 'prefetch_related_objects', 'DEFERRED', 'Model', + 'FilteredRelation', + 'ForeignKey', 'ForeignObject', 'OneToOneField', 'ManyToManyField', + 'ManyToOneRel', 'ManyToManyRel', 'OneToOneRel', 'permalink', +] diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/aggregates.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/aggregates.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0434f907bef101c2fd9e9bfb5da2c511d57886 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/aggregates.py @@ -0,0 +1,180 @@ +""" +Classes to represent the definitions of aggregate functions. +""" +from django.core.exceptions import FieldError +from django.db.models.expressions import Case, Func, Star, When +from django.db.models.fields import DecimalField, FloatField, IntegerField +from django.db.models.query_utils import Q + +__all__ = [ + 'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance', +] + + +class Aggregate(Func): + contains_aggregate = True + name = None + filter_template = '%s FILTER (WHERE %%(filter)s)' + window_compatible = True + + def __init__(self, *args, filter=None, **kwargs): + self.filter = filter + super().__init__(*args, **kwargs) + + def get_source_fields(self): + # Don't return the filter expression since it's not a source field. + return [e._output_field_or_none for e in super().get_source_expressions()] + + def get_source_expressions(self): + source_expressions = super().get_source_expressions() + if self.filter: + source_expressions += [self.filter] + return source_expressions + + def set_source_expressions(self, exprs): + if self.filter: + self.filter = exprs.pop() + return super().set_source_expressions(exprs) + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + # Aggregates are not allowed in UPDATE queries, so ignore for_save + c = super().resolve_expression(query, allow_joins, reuse, summarize) + if c.filter: + c.filter = c.filter.resolve_expression(query, allow_joins, reuse, summarize) + if not summarize: + # Call Aggregate.get_source_expressions() to avoid + # returning self.filter and including that in this loop. + expressions = super(Aggregate, c).get_source_expressions() + for index, expr in enumerate(expressions): + if expr.contains_aggregate: + before_resolved = self.get_source_expressions()[index] + name = before_resolved.name if hasattr(before_resolved, 'name') else repr(before_resolved) + raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name)) + return c + + @property + def default_alias(self): + expressions = self.get_source_expressions() + if len(expressions) == 1 and hasattr(expressions[0], 'name'): + return '%s__%s' % (expressions[0].name, self.name.lower()) + raise TypeError("Complex expressions require an alias") + + def get_group_by_cols(self): + return [] + + def as_sql(self, compiler, connection, **extra_context): + if self.filter: + if connection.features.supports_aggregate_filter_clause: + filter_sql, filter_params = self.filter.as_sql(compiler, connection) + template = self.filter_template % extra_context.get('template', self.template) + sql, params = super().as_sql(compiler, connection, template=template, filter=filter_sql) + return sql, params + filter_params + else: + copy = self.copy() + copy.filter = None + condition = When(Q()) + source_expressions = copy.get_source_expressions() + condition.set_source_expressions([self.filter, source_expressions[0]]) + copy.set_source_expressions([Case(condition)] + source_expressions[1:]) + return super(Aggregate, copy).as_sql(compiler, connection, **extra_context) + return super().as_sql(compiler, connection, **extra_context) + + def _get_repr_options(self): + options = super()._get_repr_options() + if self.filter: + options.update({'filter': self.filter}) + return options + + +class Avg(Aggregate): + function = 'AVG' + name = 'Avg' + + def _resolve_output_field(self): + source_field = self.get_source_fields()[0] + if isinstance(source_field, (IntegerField, DecimalField)): + return FloatField() + return super()._resolve_output_field() + + def as_oracle(self, compiler, connection): + if self.output_field.get_internal_type() == 'DurationField': + expression = self.get_source_expressions()[0] + from django.db.backends.oracle.functions import IntervalToSeconds, SecondsToInterval + return compiler.compile( + SecondsToInterval(Avg(IntervalToSeconds(expression), filter=self.filter)) + ) + return super().as_sql(compiler, connection) + + +class Count(Aggregate): + function = 'COUNT' + name = 'Count' + template = '%(function)s(%(distinct)s%(expressions)s)' + output_field = IntegerField() + + def __init__(self, expression, distinct=False, filter=None, **extra): + if expression == '*': + expression = Star() + if isinstance(expression, Star) and filter is not None: + raise ValueError('Star cannot be used with filter. Please specify a field.') + super().__init__( + expression, distinct='DISTINCT ' if distinct else '', + filter=filter, **extra + ) + + def _get_repr_options(self): + options = super()._get_repr_options() + return dict(options, distinct=self.extra['distinct'] != '') + + def convert_value(self, value, expression, connection): + return 0 if value is None else value + + +class Max(Aggregate): + function = 'MAX' + name = 'Max' + + +class Min(Aggregate): + function = 'MIN' + name = 'Min' + + +class StdDev(Aggregate): + name = 'StdDev' + output_field = FloatField() + + def __init__(self, expression, sample=False, **extra): + self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP' + super().__init__(expression, **extra) + + def _get_repr_options(self): + options = super()._get_repr_options() + return dict(options, sample=self.function == 'STDDEV_SAMP') + + +class Sum(Aggregate): + function = 'SUM' + name = 'Sum' + + def as_oracle(self, compiler, connection): + if self.output_field.get_internal_type() == 'DurationField': + expression = self.get_source_expressions()[0] + from django.db.backends.oracle.functions import IntervalToSeconds, SecondsToInterval + return compiler.compile( + SecondsToInterval(Sum(IntervalToSeconds(expression))) + ) + return super().as_sql(compiler, connection) + + +class Variance(Aggregate): + name = 'Variance' + output_field = FloatField() + + def __init__(self, expression, sample=False, **extra): + self.function = 'VAR_SAMP' if sample else 'VAR_POP' + super().__init__(expression, **extra) + + def _get_repr_options(self): + options = super()._get_repr_options() + return dict(options, sample=self.function == 'VAR_SAMP') diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/base.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/base.py new file mode 100644 index 0000000000000000000000000000000000000000..87bd8c09bf5b224bdd5b3ea31bbb84887de087fa --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/base.py @@ -0,0 +1,1732 @@ +import copy +import inspect +import warnings +from functools import partialmethod +from itertools import chain + +from django.apps import apps +from django.conf import settings +from django.core import checks +from django.core.exceptions import ( + NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, + ObjectDoesNotExist, ValidationError, +) +from django.db import ( + DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, + connections, router, transaction, +) +from django.db.models.constants import LOOKUP_SEP +from django.db.models.deletion import CASCADE, Collector +from django.db.models.fields.related import ( + ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, +) +from django.db.models.manager import Manager +from django.db.models.options import Options +from django.db.models.query import Q +from django.db.models.signals import ( + class_prepared, post_init, post_save, pre_init, pre_save, +) +from django.db.models.utils import make_model_tuple +from django.utils.encoding import force_text +from django.utils.text import capfirst, get_text_list +from django.utils.translation import gettext_lazy as _ +from django.utils.version import get_version + + +class Deferred: + def __repr__(self): + return '' + + def __str__(self): + return '' + + +DEFERRED = Deferred() + + +def subclass_exception(name, parents, module, attached_to=None): + """ + Create exception subclass. Used by ModelBase below. + + If 'attached_to' is supplied, the exception will be created in a way that + allows it to be pickled, assuming the returned exception class will be added + as an attribute to the 'attached_to' class. + """ + class_dict = {'__module__': module} + if attached_to is not None: + def __reduce__(self): + # Exceptions are special - they've got state that isn't + # in self.__dict__. We assume it is all in self.args. + return (unpickle_inner_exception, (attached_to, name), self.args) + + def __setstate__(self, args): + self.args = args + + class_dict['__reduce__'] = __reduce__ + class_dict['__setstate__'] = __setstate__ + + return type(name, parents, class_dict) + + +class ModelBase(type): + """Metaclass for all models.""" + def __new__(cls, name, bases, attrs): + super_new = super().__new__ + + # Also ensure initialization is only performed for subclasses of Model + # (excluding Model class itself). + parents = [b for b in bases if isinstance(b, ModelBase)] + if not parents: + return super_new(cls, name, bases, attrs) + + # Create the class. + module = attrs.pop('__module__') + new_attrs = {'__module__': module} + classcell = attrs.pop('__classcell__', None) + if classcell is not None: + new_attrs['__classcell__'] = classcell + new_class = super_new(cls, name, bases, new_attrs) + attr_meta = attrs.pop('Meta', None) + abstract = getattr(attr_meta, 'abstract', False) + if not attr_meta: + meta = getattr(new_class, 'Meta', None) + else: + meta = attr_meta + base_meta = getattr(new_class, '_meta', None) + + app_label = None + + # Look for an application configuration to attach the model to. + app_config = apps.get_containing_app_config(module) + + if getattr(meta, 'app_label', None) is None: + if app_config is None: + if not abstract: + raise RuntimeError( + "Model class %s.%s doesn't declare an explicit " + "app_label and isn't in an application in " + "INSTALLED_APPS." % (module, name) + ) + + else: + app_label = app_config.label + + new_class.add_to_class('_meta', Options(meta, app_label)) + if not abstract: + new_class.add_to_class( + 'DoesNotExist', + subclass_exception( + 'DoesNotExist', + tuple( + x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract + ) or (ObjectDoesNotExist,), + module, + attached_to=new_class)) + new_class.add_to_class( + 'MultipleObjectsReturned', + subclass_exception( + 'MultipleObjectsReturned', + tuple( + x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract + ) or (MultipleObjectsReturned,), + module, + attached_to=new_class)) + if base_meta and not base_meta.abstract: + # Non-abstract child classes inherit some attributes from their + # non-abstract parent (unless an ABC comes before it in the + # method resolution order). + if not hasattr(meta, 'ordering'): + new_class._meta.ordering = base_meta.ordering + if not hasattr(meta, 'get_latest_by'): + new_class._meta.get_latest_by = base_meta.get_latest_by + + is_proxy = new_class._meta.proxy + + # If the model is a proxy, ensure that the base class + # hasn't been swapped out. + if is_proxy and base_meta and base_meta.swapped: + raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) + + # Add all attributes to the class. + for obj_name, obj in attrs.items(): + new_class.add_to_class(obj_name, obj) + + # All the fields of any type declared on this model + new_fields = chain( + new_class._meta.local_fields, + new_class._meta.local_many_to_many, + new_class._meta.private_fields + ) + field_names = {f.name for f in new_fields} + + # Basic setup for proxy models. + if is_proxy: + base = None + for parent in [kls for kls in parents if hasattr(kls, '_meta')]: + if parent._meta.abstract: + if parent._meta.fields: + raise TypeError( + "Abstract base class containing model fields not " + "permitted for proxy model '%s'." % name + ) + else: + continue + if base is None: + base = parent + elif parent._meta.concrete_model is not base._meta.concrete_model: + raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) + if base is None: + raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) + new_class._meta.setup_proxy(base) + new_class._meta.concrete_model = base._meta.concrete_model + else: + new_class._meta.concrete_model = new_class + + # Collect the parent links for multi-table inheritance. + parent_links = {} + for base in reversed([new_class] + parents): + # Conceptually equivalent to `if base is Model`. + if not hasattr(base, '_meta'): + continue + # Skip concrete parent classes. + if base != new_class and not base._meta.abstract: + continue + # Locate OneToOneField instances. + for field in base._meta.local_fields: + if isinstance(field, OneToOneField): + related = resolve_relation(new_class, field.remote_field.model) + parent_links[make_model_tuple(related)] = field + + # Track fields inherited from base models. + inherited_attributes = set() + # Do the appropriate setup for any model parents. + for base in new_class.mro(): + if base not in parents or not hasattr(base, '_meta'): + # Things without _meta aren't functional models, so they're + # uninteresting parents. + inherited_attributes.update(base.__dict__) + continue + + parent_fields = base._meta.local_fields + base._meta.local_many_to_many + if not base._meta.abstract: + # Check for clashes between locally declared fields and those + # on the base classes. + for field in parent_fields: + if field.name in field_names: + raise FieldError( + 'Local field %r in class %r clashes with field of ' + 'the same name from base class %r.' % ( + field.name, + name, + base.__name__, + ) + ) + else: + inherited_attributes.add(field.name) + + # Concrete classes... + base = base._meta.concrete_model + base_key = make_model_tuple(base) + if base_key in parent_links: + field = parent_links[base_key] + elif not is_proxy: + attr_name = '%s_ptr' % base._meta.model_name + field = OneToOneField( + base, + on_delete=CASCADE, + name=attr_name, + auto_created=True, + parent_link=True, + ) + + if attr_name in field_names: + raise FieldError( + "Auto-generated field '%s' in class %r for " + "parent_link to base class %r clashes with " + "declared field of the same name." % ( + attr_name, + name, + base.__name__, + ) + ) + + # Only add the ptr field if it's not already present; + # e.g. migrations will already have it specified + if not hasattr(new_class, attr_name): + new_class.add_to_class(attr_name, field) + else: + field = None + new_class._meta.parents[base] = field + else: + base_parents = base._meta.parents.copy() + + # Add fields from abstract base class if it wasn't overridden. + for field in parent_fields: + if (field.name not in field_names and + field.name not in new_class.__dict__ and + field.name not in inherited_attributes): + new_field = copy.deepcopy(field) + new_class.add_to_class(field.name, new_field) + # Replace parent links defined on this base by the new + # field. It will be appropriately resolved if required. + if field.one_to_one: + for parent, parent_link in base_parents.items(): + if field == parent_link: + base_parents[parent] = new_field + + # Pass any non-abstract parent classes onto child. + new_class._meta.parents.update(base_parents) + + # Inherit private fields (like GenericForeignKey) from the parent + # class + for field in base._meta.private_fields: + if field.name in field_names: + if not base._meta.abstract: + raise FieldError( + 'Local field %r in class %r clashes with field of ' + 'the same name from base class %r.' % ( + field.name, + name, + base.__name__, + ) + ) + else: + new_class.add_to_class(field.name, copy.deepcopy(field)) + + # Copy indexes so that index names are unique when models extend an + # abstract model. + new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] + + if abstract: + # Abstract base models can't be instantiated and don't appear in + # the list of models for an app. We do the final setup for them a + # little differently from normal models. + attr_meta.abstract = False + new_class.Meta = attr_meta + return new_class + + new_class._prepare() + new_class._meta.apps.register_model(new_class._meta.app_label, new_class) + return new_class + + def add_to_class(cls, name, value): + # We should call the contribute_to_class method only if it's bound + if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'): + value.contribute_to_class(cls, name) + else: + setattr(cls, name, value) + + def _prepare(cls): + """Create some methods once self._meta has been populated.""" + opts = cls._meta + opts._prepare(cls) + + if opts.order_with_respect_to: + cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) + cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) + + # Defer creating accessors on the foreign class until it has been + # created and registered. If remote_field is None, we're ordering + # with respect to a GenericForeignKey and don't know what the + # foreign class is - we'll add those accessors later in + # contribute_to_class(). + if opts.order_with_respect_to.remote_field: + wrt = opts.order_with_respect_to + remote = wrt.remote_field.model + lazy_related_operation(make_foreign_order_accessors, cls, remote) + + # Give the class a docstring -- its definition. + if cls.__doc__ is None: + cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) + + get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) + if get_absolute_url_override: + setattr(cls, 'get_absolute_url', get_absolute_url_override) + + if not opts.managers: + if any(f.name == 'objects' for f in opts.fields): + raise ValueError( + "Model %s must specify a custom Manager, because it has a " + "field named 'objects'." % cls.__name__ + ) + manager = Manager() + manager.auto_created = True + cls.add_to_class('objects', manager) + + # Set the name of _meta.indexes. This can't be done in + # Options.contribute_to_class() because fields haven't been added to + # the model at that point. + for index in cls._meta.indexes: + if not index.name: + index.set_name_with_model(cls) + + class_prepared.send(sender=cls) + + @property + def _base_manager(cls): + return cls._meta.base_manager + + @property + def _default_manager(cls): + return cls._meta.default_manager + + +class ModelStateFieldsCacheDescriptor: + def __get__(self, instance, cls=None): + if instance is None: + return self + res = instance.fields_cache = {} + return res + + +class ModelState: + """Store model instance state.""" + db = None + # If true, uniqueness validation checks will consider this a new, unsaved + # object. Necessary for correct validation of new instances of objects with + # explicit (non-auto) PKs. This impacts validation only; it has no effect + # on the actual save. + adding = True + fields_cache = ModelStateFieldsCacheDescriptor() + + +class Model(metaclass=ModelBase): + + def __init__(self, *args, **kwargs): + # Alias some things as locals to avoid repeat global lookups + cls = self.__class__ + opts = self._meta + _setattr = setattr + _DEFERRED = DEFERRED + + pre_init.send(sender=cls, args=args, kwargs=kwargs) + + # Set up the storage for instance state + self._state = ModelState() + + # There is a rather weird disparity here; if kwargs, it's set, then args + # overrides it. It should be one or the other; don't duplicate the work + # The reason for the kwargs check is that standard iterator passes in by + # args, and instantiation for iteration is 33% faster. + if len(args) > len(opts.concrete_fields): + # Daft, but matches old exception sans the err msg. + raise IndexError("Number of args exceeds number of fields") + + if not kwargs: + fields_iter = iter(opts.concrete_fields) + # The ordering of the zip calls matter - zip throws StopIteration + # when an iter throws it. So if the first iter throws it, the second + # is *not* consumed. We rely on this, so don't change the order + # without changing the logic. + for val, field in zip(args, fields_iter): + if val is _DEFERRED: + continue + _setattr(self, field.attname, val) + else: + # Slower, kwargs-ready version. + fields_iter = iter(opts.fields) + for val, field in zip(args, fields_iter): + if val is _DEFERRED: + continue + _setattr(self, field.attname, val) + kwargs.pop(field.name, None) + + # Now we're left with the unprocessed fields that *must* come from + # keywords, or default. + + for field in fields_iter: + is_related_object = False + # Virtual field + if field.attname not in kwargs and field.column is None: + continue + if kwargs: + if isinstance(field.remote_field, ForeignObjectRel): + try: + # Assume object instance was passed in. + rel_obj = kwargs.pop(field.name) + is_related_object = True + except KeyError: + try: + # Object instance wasn't passed in -- must be an ID. + val = kwargs.pop(field.attname) + except KeyError: + val = field.get_default() + else: + # Object instance was passed in. Special case: You can + # pass in "None" for related objects if it's allowed. + if rel_obj is None and field.null: + val = None + else: + try: + val = kwargs.pop(field.attname) + except KeyError: + # This is done with an exception rather than the + # default argument on pop because we don't want + # get_default() to be evaluated, and then not used. + # Refs #12057. + val = field.get_default() + else: + val = field.get_default() + + if is_related_object: + # If we are passed a related instance, set it using the + # field.name instead of field.attname (e.g. "user" instead of + # "user_id") so that the object gets properly cached (and type + # checked) by the RelatedObjectDescriptor. + if rel_obj is not _DEFERRED: + _setattr(self, field.name, rel_obj) + else: + if val is not _DEFERRED: + _setattr(self, field.attname, val) + + if kwargs: + property_names = opts._property_names + for prop in tuple(kwargs): + try: + # Any remaining kwargs must correspond to properties or + # virtual fields. + if prop in property_names or opts.get_field(prop): + if kwargs[prop] is not _DEFERRED: + _setattr(self, prop, kwargs[prop]) + del kwargs[prop] + except (AttributeError, FieldDoesNotExist): + pass + for kwarg in kwargs: + raise TypeError("'%s' is an invalid keyword argument for this function" % kwarg) + super().__init__() + post_init.send(sender=cls, instance=self) + + @classmethod + def from_db(cls, db, field_names, values): + if len(values) != len(cls._meta.concrete_fields): + values_iter = iter(values) + values = [ + next(values_iter) if f.attname in field_names else DEFERRED + for f in cls._meta.concrete_fields + ] + new = cls(*values) + new._state.adding = False + new._state.db = db + return new + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self) + + def __str__(self): + return '%s object (%s)' % (self.__class__.__name__, self.pk) + + def __eq__(self, other): + if not isinstance(other, Model): + return False + if self._meta.concrete_model != other._meta.concrete_model: + return False + my_pk = self.pk + if my_pk is None: + return self is other + return my_pk == other.pk + + def __hash__(self): + if self.pk is None: + raise TypeError("Model instances without primary key value are unhashable") + return hash(self.pk) + + def __reduce__(self): + data = self.__getstate__() + data[DJANGO_VERSION_PICKLE_KEY] = get_version() + class_id = self._meta.app_label, self._meta.object_name + return model_unpickle, (class_id,), data + + def __getstate__(self): + """Hook to allow choosing the attributes to pickle.""" + return self.__dict__ + + def __setstate__(self, state): + msg = None + pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) + if pickled_version: + current_version = get_version() + if current_version != pickled_version: + msg = ( + "Pickled model instance's Django version %s does not match " + "the current version %s." % (pickled_version, current_version) + ) + else: + msg = "Pickled model instance's Django version is not specified." + + if msg: + warnings.warn(msg, RuntimeWarning, stacklevel=2) + + self.__dict__.update(state) + + def _get_pk_val(self, meta=None): + if not meta: + meta = self._meta + return getattr(self, meta.pk.attname) + + def _set_pk_val(self, value): + return setattr(self, self._meta.pk.attname, value) + + pk = property(_get_pk_val, _set_pk_val) + + def get_deferred_fields(self): + """ + Return a set containing names of deferred fields for this instance. + """ + return { + f.attname for f in self._meta.concrete_fields + if f.attname not in self.__dict__ + } + + def refresh_from_db(self, using=None, fields=None): + """ + Reload field values from the database. + + By default, the reloading happens from the database this instance was + loaded from, or by the read router if this instance wasn't loaded from + any database. The using parameter will override the default. + + Fields can be used to specify which fields to reload. The fields + should be an iterable of field attnames. If fields is None, then + all non-deferred fields are reloaded. + + When accessing deferred fields of an instance, the deferred loading + of the field will call this method. + """ + if fields is not None: + if len(fields) == 0: + return + if any(LOOKUP_SEP in f for f in fields): + raise ValueError( + 'Found "%s" in fields argument. Relations and transforms ' + 'are not allowed in fields.' % LOOKUP_SEP) + + db = using if using is not None else self._state.db + db_instance_qs = self.__class__._default_manager.using(db).filter(pk=self.pk) + + # Use provided fields, if not set then reload all non-deferred fields. + deferred_fields = self.get_deferred_fields() + if fields is not None: + fields = list(fields) + db_instance_qs = db_instance_qs.only(*fields) + elif deferred_fields: + fields = [f.attname for f in self._meta.concrete_fields + if f.attname not in deferred_fields] + db_instance_qs = db_instance_qs.only(*fields) + + db_instance = db_instance_qs.get() + non_loaded_fields = db_instance.get_deferred_fields() + for field in self._meta.concrete_fields: + if field.attname in non_loaded_fields: + # This field wasn't refreshed - skip ahead. + continue + setattr(self, field.attname, getattr(db_instance, field.attname)) + # Throw away stale foreign key references. + if field.is_relation and field.is_cached(self): + rel_instance = field.get_cached_value(self) + local_val = getattr(db_instance, field.attname) + related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname) + if local_val != related_val or (local_val is None and related_val is None): + field.delete_cached_value(self) + + # Clear cached relations. + for field in self._meta.related_objects: + if field.is_cached(self): + field.delete_cached_value(self) + + self._state.db = db_instance._state.db + + def serializable_value(self, field_name): + """ + Return the value of the field name for this instance. If the field is + a foreign key, return the id value instead of the object. If there's + no Field object with this name on the model, return the model + attribute's value. + + Used to serialize a field's value (in the serializer, or form output, + for example). Normally, you would just access the attribute directly + and not use this method. + """ + try: + field = self._meta.get_field(field_name) + except FieldDoesNotExist: + return getattr(self, field_name) + return getattr(self, field.attname) + + def save(self, force_insert=False, force_update=False, using=None, + update_fields=None): + """ + Save the current instance. Override this in a subclass if you want to + control the saving process. + + The 'force_insert' and 'force_update' parameters can be used to insist + that the "save" must be an SQL insert or update (or equivalent for + non-SQL backends), respectively. Normally, they should not be set. + """ + # Ensure that a model instance without a PK hasn't been assigned to + # a ForeignKey or OneToOneField on this model. If the field is + # nullable, allowing the save() would result in silent data loss. + for field in self._meta.concrete_fields: + # If the related field isn't cached, then an instance hasn't + # been assigned and there's no need to worry about this check. + if field.is_relation and field.is_cached(self): + obj = getattr(self, field.name, None) + # A pk may have been assigned manually to a model instance not + # saved to the database (or auto-generated in a case like + # UUIDField), but we allow the save to proceed and rely on the + # database to raise an IntegrityError if applicable. If + # constraints aren't supported by the database, there's the + # unavoidable risk of data corruption. + if obj and obj.pk is None: + # Remove the object from a related instance cache. + if not field.remote_field.multiple: + field.remote_field.delete_cached_value(obj) + raise ValueError( + "save() prohibited to prevent data loss due to " + "unsaved related object '%s'." % field.name + ) + + using = using or router.db_for_write(self.__class__, instance=self) + if force_insert and (force_update or update_fields): + raise ValueError("Cannot force both insert and updating in model saving.") + + deferred_fields = self.get_deferred_fields() + if update_fields is not None: + # If update_fields is empty, skip the save. We do also check for + # no-op saves later on for inheritance cases. This bailout is + # still needed for skipping signal sending. + if len(update_fields) == 0: + return + + update_fields = frozenset(update_fields) + field_names = set() + + for field in self._meta.fields: + if not field.primary_key: + field_names.add(field.name) + + if field.name != field.attname: + field_names.add(field.attname) + + non_model_fields = update_fields.difference(field_names) + + if non_model_fields: + raise ValueError("The following fields do not exist in this " + "model or are m2m fields: %s" + % ', '.join(non_model_fields)) + + # If saving to the same database, and this model is deferred, then + # automatically do a "update_fields" save on the loaded fields. + elif not force_insert and deferred_fields and using == self._state.db: + field_names = set() + for field in self._meta.concrete_fields: + if not field.primary_key and not hasattr(field, 'through'): + field_names.add(field.attname) + loaded_fields = field_names.difference(deferred_fields) + if loaded_fields: + update_fields = frozenset(loaded_fields) + + self.save_base(using=using, force_insert=force_insert, + force_update=force_update, update_fields=update_fields) + save.alters_data = True + + def save_base(self, raw=False, force_insert=False, + force_update=False, using=None, update_fields=None): + """ + Handle the parts of saving which should be done only once per save, + yet need to be done in raw saves, too. This includes some sanity + checks and signal sending. + + The 'raw' argument is telling save_base not to save any parent + models and not to do any changes to the values before save. This + is used by fixture loading. + """ + using = using or router.db_for_write(self.__class__, instance=self) + assert not (force_insert and (force_update or update_fields)) + assert update_fields is None or len(update_fields) > 0 + cls = origin = self.__class__ + # Skip proxies, but keep the origin as the proxy model. + if cls._meta.proxy: + cls = cls._meta.concrete_model + meta = cls._meta + if not meta.auto_created: + pre_save.send( + sender=origin, instance=self, raw=raw, using=using, + update_fields=update_fields, + ) + with transaction.atomic(using=using, savepoint=False): + if not raw: + self._save_parents(cls, using, update_fields) + updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields) + # Store the database on which the object was saved + self._state.db = using + # Once saved, this is no longer a to-be-added instance. + self._state.adding = False + + # Signal that the save is complete + if not meta.auto_created: + post_save.send( + sender=origin, instance=self, created=(not updated), + update_fields=update_fields, raw=raw, using=using, + ) + + save_base.alters_data = True + + def _save_parents(self, cls, using, update_fields): + """Save all the parents of cls using values from self.""" + meta = cls._meta + for parent, field in meta.parents.items(): + # Make sure the link fields are synced between parent and self. + if (field and getattr(self, parent._meta.pk.attname) is None and + getattr(self, field.attname) is not None): + setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) + self._save_parents(cls=parent, using=using, update_fields=update_fields) + self._save_table(cls=parent, using=using, update_fields=update_fields) + # Set the parent's PK value to self. + if field: + setattr(self, field.attname, self._get_pk_val(parent._meta)) + # Since we didn't have an instance of the parent handy set + # attname directly, bypassing the descriptor. Invalidate + # the related object cache, in case it's been accidentally + # populated. A fresh instance will be re-built from the + # database if necessary. + if field.is_cached(self): + field.delete_cached_value(self) + + def _save_table(self, raw=False, cls=None, force_insert=False, + force_update=False, using=None, update_fields=None): + """ + Do the heavy-lifting involved in saving. Update or insert the data + for a single table. + """ + meta = cls._meta + non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] + + if update_fields: + non_pks = [f for f in non_pks + if f.name in update_fields or f.attname in update_fields] + + pk_val = self._get_pk_val(meta) + if pk_val is None: + pk_val = meta.pk.get_pk_value_on_save(self) + setattr(self, meta.pk.attname, pk_val) + pk_set = pk_val is not None + if not pk_set and (force_update or update_fields): + raise ValueError("Cannot force an update in save() with no primary key.") + updated = False + # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. + if pk_set and not force_insert: + base_qs = cls._base_manager.using(using) + values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) + for f in non_pks] + forced_update = update_fields or force_update + updated = self._do_update(base_qs, using, pk_val, values, update_fields, + forced_update) + if force_update and not updated: + raise DatabaseError("Forced update did not affect any rows.") + if update_fields and not updated: + raise DatabaseError("Save with update_fields did not affect any rows.") + if not updated: + if meta.order_with_respect_to: + # If this is a model with an order_with_respect_to + # autopopulate the _order field + field = meta.order_with_respect_to + filter_args = field.get_filter_kwargs_for_object(self) + order_value = cls._base_manager.using(using).filter(**filter_args).count() + self._order = order_value + + fields = meta.local_concrete_fields + if not pk_set: + fields = [f for f in fields if f is not meta.auto_field] + + update_pk = meta.auto_field and not pk_set + result = self._do_insert(cls._base_manager, using, fields, update_pk, raw) + if update_pk: + setattr(self, meta.pk.attname, result) + return updated + + def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): + """ + Try to update the model. Return True if the model was updated (if an + update query was done and a matching row was found in the DB). + """ + filtered = base_qs.filter(pk=pk_val) + if not values: + # We can end up here when saving a model in inheritance chain where + # update_fields doesn't target any field in current model. In that + # case we just say the update succeeded. Another case ending up here + # is a model with just PK - in that case check that the PK still + # exists. + return update_fields is not None or filtered.exists() + if self._meta.select_on_save and not forced_update: + if filtered.exists(): + # It may happen that the object is deleted from the DB right after + # this check, causing the subsequent UPDATE to return zero matching + # rows. The same result can occur in some rare cases when the + # database returns zero despite the UPDATE being executed + # successfully (a row is matched and updated). In order to + # distinguish these two cases, the object's existence in the + # database is again checked for if the UPDATE query returns 0. + return filtered._update(values) > 0 or filtered.exists() + else: + return False + return filtered._update(values) > 0 + + def _do_insert(self, manager, using, fields, update_pk, raw): + """ + Do an INSERT. If update_pk is defined then this method should return + the new pk for the model. + """ + return manager._insert([self], fields=fields, return_id=update_pk, + using=using, raw=raw) + + def delete(self, using=None, keep_parents=False): + using = using or router.db_for_write(self.__class__, instance=self) + assert self.pk is not None, ( + "%s object can't be deleted because its %s attribute is set to None." % + (self._meta.object_name, self._meta.pk.attname) + ) + + collector = Collector(using=using) + collector.collect([self], keep_parents=keep_parents) + return collector.delete() + + delete.alters_data = True + + def _get_FIELD_display(self, field): + value = getattr(self, field.attname) + return force_text(dict(field.flatchoices).get(value, value), strings_only=True) + + def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): + if not self.pk: + raise ValueError("get_next/get_previous cannot be used on unsaved objects.") + op = 'gt' if is_next else 'lt' + order = '' if is_next else '-' + param = getattr(self, field.attname) + q = Q(**{'%s__%s' % (field.name, op): param}) + q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) + qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( + '%s%s' % (order, field.name), '%spk' % order + ) + try: + return qs[0] + except IndexError: + raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) + + def _get_next_or_previous_in_order(self, is_next): + cachename = "__%s_order_cache" % is_next + if not hasattr(self, cachename): + op = 'gt' if is_next else 'lt' + order = '_order' if is_next else '-_order' + order_field = self._meta.order_with_respect_to + filter_args = order_field.get_filter_kwargs_for_object(self) + obj = self.__class__._default_manager.filter(**filter_args).filter(**{ + '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ + self._meta.pk.name: self.pk + }) + }).order_by(order)[:1].get() + setattr(self, cachename, obj) + return getattr(self, cachename) + + def prepare_database_save(self, field): + if self.pk is None: + raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) + return getattr(self, field.remote_field.get_related_field().attname) + + def clean(self): + """ + Hook for doing any extra model-wide validation after clean() has been + called on every field by self.clean_fields. Any ValidationError raised + by this method will not be associated with a particular field; it will + have a special-case association with the field defined by NON_FIELD_ERRORS. + """ + pass + + def validate_unique(self, exclude=None): + """ + Check unique constraints on the model and raise ValidationError if any + failed. + """ + unique_checks, date_checks = self._get_unique_checks(exclude=exclude) + + errors = self._perform_unique_checks(unique_checks) + date_errors = self._perform_date_checks(date_checks) + + for k, v in date_errors.items(): + errors.setdefault(k, []).extend(v) + + if errors: + raise ValidationError(errors) + + def _get_unique_checks(self, exclude=None): + """ + Return a list of checks to perform. Since validate_unique() could be + called from a ModelForm, some fields may have been excluded; we can't + perform a unique check on a model that is missing fields involved + in that check. Fields that did not validate should also be excluded, + but they need to be passed in via the exclude argument. + """ + if exclude is None: + exclude = [] + unique_checks = [] + + unique_togethers = [(self.__class__, self._meta.unique_together)] + for parent_class in self._meta.get_parent_list(): + if parent_class._meta.unique_together: + unique_togethers.append((parent_class, parent_class._meta.unique_together)) + + for model_class, unique_together in unique_togethers: + for check in unique_together: + for name in check: + # If this is an excluded field, don't add this check. + if name in exclude: + break + else: + unique_checks.append((model_class, tuple(check))) + + # These are checks for the unique_for_. + date_checks = [] + + # Gather a list of checks for fields declared as unique and add them to + # the list of checks. + + fields_with_class = [(self.__class__, self._meta.local_fields)] + for parent_class in self._meta.get_parent_list(): + fields_with_class.append((parent_class, parent_class._meta.local_fields)) + + for model_class, fields in fields_with_class: + for f in fields: + name = f.name + if name in exclude: + continue + if f.unique: + unique_checks.append((model_class, (name,))) + if f.unique_for_date and f.unique_for_date not in exclude: + date_checks.append((model_class, 'date', name, f.unique_for_date)) + if f.unique_for_year and f.unique_for_year not in exclude: + date_checks.append((model_class, 'year', name, f.unique_for_year)) + if f.unique_for_month and f.unique_for_month not in exclude: + date_checks.append((model_class, 'month', name, f.unique_for_month)) + return unique_checks, date_checks + + def _perform_unique_checks(self, unique_checks): + errors = {} + + for model_class, unique_check in unique_checks: + # Try to look up an existing object with the same values as this + # object's values for all the unique field. + + lookup_kwargs = {} + for field_name in unique_check: + f = self._meta.get_field(field_name) + lookup_value = getattr(self, f.attname) + # TODO: Handle multiple backends with different feature flags. + if (lookup_value is None or + (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): + # no value, skip the lookup + continue + if f.primary_key and not self._state.adding: + # no need to check for unique primary key when editing + continue + lookup_kwargs[str(field_name)] = lookup_value + + # some fields were skipped, no reason to do the check + if len(unique_check) != len(lookup_kwargs): + continue + + qs = model_class._default_manager.filter(**lookup_kwargs) + + # Exclude the current object from the query if we are editing an + # instance (as opposed to creating a new one) + # Note that we need to use the pk as defined by model_class, not + # self.pk. These can be different fields because model inheritance + # allows single model to have effectively multiple primary keys. + # Refs #17615. + model_class_pk = self._get_pk_val(model_class._meta) + if not self._state.adding and model_class_pk is not None: + qs = qs.exclude(pk=model_class_pk) + if qs.exists(): + if len(unique_check) == 1: + key = unique_check[0] + else: + key = NON_FIELD_ERRORS + errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) + + return errors + + def _perform_date_checks(self, date_checks): + errors = {} + for model_class, lookup_type, field, unique_for in date_checks: + lookup_kwargs = {} + # there's a ticket to add a date lookup, we can remove this special + # case if that makes it's way in + date = getattr(self, unique_for) + if date is None: + continue + if lookup_type == 'date': + lookup_kwargs['%s__day' % unique_for] = date.day + lookup_kwargs['%s__month' % unique_for] = date.month + lookup_kwargs['%s__year' % unique_for] = date.year + else: + lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) + lookup_kwargs[field] = getattr(self, field) + + qs = model_class._default_manager.filter(**lookup_kwargs) + # Exclude the current object from the query if we are editing an + # instance (as opposed to creating a new one) + if not self._state.adding and self.pk is not None: + qs = qs.exclude(pk=self.pk) + + if qs.exists(): + errors.setdefault(field, []).append( + self.date_error_message(lookup_type, field, unique_for) + ) + return errors + + def date_error_message(self, lookup_type, field_name, unique_for): + opts = self._meta + field = opts.get_field(field_name) + return ValidationError( + message=field.error_messages['unique_for_date'], + code='unique_for_date', + params={ + 'model': self, + 'model_name': capfirst(opts.verbose_name), + 'lookup_type': lookup_type, + 'field': field_name, + 'field_label': capfirst(field.verbose_name), + 'date_field': unique_for, + 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), + } + ) + + def unique_error_message(self, model_class, unique_check): + opts = model_class._meta + + params = { + 'model': self, + 'model_class': model_class, + 'model_name': capfirst(opts.verbose_name), + 'unique_check': unique_check, + } + + # A unique field + if len(unique_check) == 1: + field = opts.get_field(unique_check[0]) + params['field_label'] = capfirst(field.verbose_name) + return ValidationError( + message=field.error_messages['unique'], + code='unique', + params=params, + ) + + # unique_together + else: + field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] + params['field_labels'] = get_text_list(field_labels, _('and')) + return ValidationError( + message=_("%(model_name)s with this %(field_labels)s already exists."), + code='unique_together', + params=params, + ) + + def full_clean(self, exclude=None, validate_unique=True): + """ + Call clean_fields(), clean(), and validate_unique() on the model. + Raise a ValidationError for any errors that occur. + """ + errors = {} + if exclude is None: + exclude = [] + else: + exclude = list(exclude) + + try: + self.clean_fields(exclude=exclude) + except ValidationError as e: + errors = e.update_error_dict(errors) + + # Form.clean() is run even if other validation fails, so do the + # same with Model.clean() for consistency. + try: + self.clean() + except ValidationError as e: + errors = e.update_error_dict(errors) + + # Run unique checks, but only for fields that passed validation. + if validate_unique: + for name in errors: + if name != NON_FIELD_ERRORS and name not in exclude: + exclude.append(name) + try: + self.validate_unique(exclude=exclude) + except ValidationError as e: + errors = e.update_error_dict(errors) + + if errors: + raise ValidationError(errors) + + def clean_fields(self, exclude=None): + """ + Clean all fields and raise a ValidationError containing a dict + of all validation errors if any occur. + """ + if exclude is None: + exclude = [] + + errors = {} + for f in self._meta.fields: + if f.name in exclude: + continue + # Skip validation for empty fields with blank=True. The developer + # is responsible for making sure they have a valid value. + raw_value = getattr(self, f.attname) + if f.blank and raw_value in f.empty_values: + continue + try: + setattr(self, f.attname, f.clean(raw_value, self)) + except ValidationError as e: + errors[f.name] = e.error_list + + if errors: + raise ValidationError(errors) + + @classmethod + def check(cls, **kwargs): + errors = [] + errors.extend(cls._check_swappable()) + errors.extend(cls._check_model()) + errors.extend(cls._check_managers(**kwargs)) + if not cls._meta.swapped: + errors.extend(cls._check_fields(**kwargs)) + errors.extend(cls._check_m2m_through_same_relationship()) + errors.extend(cls._check_long_column_names()) + clash_errors = ( + cls._check_id_field() + + cls._check_field_name_clashes() + + cls._check_model_name_db_lookup_clashes() + ) + errors.extend(clash_errors) + # If there are field name clashes, hide consequent column name + # clashes. + if not clash_errors: + errors.extend(cls._check_column_name_clashes()) + errors.extend(cls._check_index_together()) + errors.extend(cls._check_unique_together()) + errors.extend(cls._check_ordering()) + + return errors + + @classmethod + def _check_swappable(cls): + """Check if the swapped model exists.""" + errors = [] + if cls._meta.swapped: + try: + apps.get_model(cls._meta.swapped) + except ValueError: + errors.append( + checks.Error( + "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, + id='models.E001', + ) + ) + except LookupError: + app_label, model_name = cls._meta.swapped.split('.') + errors.append( + checks.Error( + "'%s' references '%s.%s', which has not been " + "installed, or is abstract." % ( + cls._meta.swappable, app_label, model_name + ), + id='models.E002', + ) + ) + return errors + + @classmethod + def _check_model(cls): + errors = [] + if cls._meta.proxy: + if cls._meta.local_fields or cls._meta.local_many_to_many: + errors.append( + checks.Error( + "Proxy model '%s' contains model fields." % cls.__name__, + id='models.E017', + ) + ) + return errors + + @classmethod + def _check_managers(cls, **kwargs): + """Perform all manager checks.""" + errors = [] + for manager in cls._meta.managers: + errors.extend(manager.check(**kwargs)) + return errors + + @classmethod + def _check_fields(cls, **kwargs): + """Perform all field checks.""" + errors = [] + for field in cls._meta.local_fields: + errors.extend(field.check(**kwargs)) + for field in cls._meta.local_many_to_many: + errors.extend(field.check(from_model=cls, **kwargs)) + return errors + + @classmethod + def _check_m2m_through_same_relationship(cls): + """ Check if no relationship model is used by more than one m2m field. + """ + + errors = [] + seen_intermediary_signatures = [] + + fields = cls._meta.local_many_to_many + + # Skip when the target model wasn't found. + fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) + + # Skip when the relationship model wasn't found. + fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) + + for f in fields: + signature = (f.remote_field.model, cls, f.remote_field.through) + if signature in seen_intermediary_signatures: + errors.append( + checks.Error( + "The model has two many-to-many relations through " + "the intermediate model '%s'." % f.remote_field.through._meta.label, + obj=cls, + id='models.E003', + ) + ) + else: + seen_intermediary_signatures.append(signature) + return errors + + @classmethod + def _check_id_field(cls): + """Check if `id` field is a primary key.""" + fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] + # fields is empty or consists of the invalid "id" field + if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': + return [ + checks.Error( + "'id' can only be used as a field name if the field also " + "sets 'primary_key=True'.", + obj=cls, + id='models.E004', + ) + ] + else: + return [] + + @classmethod + def _check_field_name_clashes(cls): + """Forbid field shadowing in multi-table inheritance.""" + errors = [] + used_fields = {} # name or attname -> field + + # Check that multi-inheritance doesn't cause field name shadowing. + for parent in cls._meta.get_parent_list(): + for f in parent._meta.local_fields: + clash = used_fields.get(f.name) or used_fields.get(f.attname) or None + if clash: + errors.append( + checks.Error( + "The field '%s' from parent model " + "'%s' clashes with the field '%s' " + "from parent model '%s'." % ( + clash.name, clash.model._meta, + f.name, f.model._meta + ), + obj=cls, + id='models.E005', + ) + ) + used_fields[f.name] = f + used_fields[f.attname] = f + + # Check that fields defined in the model don't clash with fields from + # parents, including auto-generated fields like multi-table inheritance + # child accessors. + for parent in cls._meta.get_parent_list(): + for f in parent._meta.get_fields(): + if f not in used_fields: + used_fields[f.name] = f + + for f in cls._meta.local_fields: + clash = used_fields.get(f.name) or used_fields.get(f.attname) or None + # Note that we may detect clash between user-defined non-unique + # field "id" and automatically added unique field "id", both + # defined at the same model. This special case is considered in + # _check_id_field and here we ignore it. + id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls + if clash and not id_conflict: + errors.append( + checks.Error( + "The field '%s' clashes with the field '%s' " + "from model '%s'." % ( + f.name, clash.name, clash.model._meta + ), + obj=f, + id='models.E006', + ) + ) + used_fields[f.name] = f + used_fields[f.attname] = f + + return errors + + @classmethod + def _check_column_name_clashes(cls): + # Store a list of column names which have already been used by other fields. + used_column_names = [] + errors = [] + + for f in cls._meta.local_fields: + _, column_name = f.get_attname_column() + + # Ensure the column name is not already in use. + if column_name and column_name in used_column_names: + errors.append( + checks.Error( + "Field '%s' has column name '%s' that is used by " + "another field." % (f.name, column_name), + hint="Specify a 'db_column' for the field.", + obj=cls, + id='models.E007' + ) + ) + else: + used_column_names.append(column_name) + + return errors + + @classmethod + def _check_model_name_db_lookup_clashes(cls): + errors = [] + model_name = cls.__name__ + if model_name.startswith('_') or model_name.endswith('_'): + errors.append( + checks.Error( + "The model name '%s' cannot start or end with an underscore " + "as it collides with the query lookup syntax." % model_name, + obj=cls, + id='models.E023' + ) + ) + elif LOOKUP_SEP in model_name: + errors.append( + checks.Error( + "The model name '%s' cannot contain double underscores as " + "it collides with the query lookup syntax." % model_name, + obj=cls, + id='models.E024' + ) + ) + return errors + + @classmethod + def _check_index_together(cls): + """Check the value of "index_together" option.""" + if not isinstance(cls._meta.index_together, (tuple, list)): + return [ + checks.Error( + "'index_together' must be a list or tuple.", + obj=cls, + id='models.E008', + ) + ] + + elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): + return [ + checks.Error( + "All 'index_together' elements must be lists or tuples.", + obj=cls, + id='models.E009', + ) + ] + + else: + errors = [] + for fields in cls._meta.index_together: + errors.extend(cls._check_local_fields(fields, "index_together")) + return errors + + @classmethod + def _check_unique_together(cls): + """Check the value of "unique_together" option.""" + if not isinstance(cls._meta.unique_together, (tuple, list)): + return [ + checks.Error( + "'unique_together' must be a list or tuple.", + obj=cls, + id='models.E010', + ) + ] + + elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): + return [ + checks.Error( + "All 'unique_together' elements must be lists or tuples.", + obj=cls, + id='models.E011', + ) + ] + + else: + errors = [] + for fields in cls._meta.unique_together: + errors.extend(cls._check_local_fields(fields, "unique_together")) + return errors + + @classmethod + def _check_local_fields(cls, fields, option): + from django.db import models + + # In order to avoid hitting the relation tree prematurely, we use our + # own fields_map instead of using get_field() + forward_fields_map = { + field.name: field for field in cls._meta._get_fields(reverse=False) + } + + errors = [] + for field_name in fields: + try: + field = forward_fields_map[field_name] + except KeyError: + errors.append( + checks.Error( + "'%s' refers to the nonexistent field '%s'." % ( + option, field_name, + ), + obj=cls, + id='models.E012', + ) + ) + else: + if isinstance(field.remote_field, models.ManyToManyRel): + errors.append( + checks.Error( + "'%s' refers to a ManyToManyField '%s', but " + "ManyToManyFields are not permitted in '%s'." % ( + option, field_name, option, + ), + obj=cls, + id='models.E013', + ) + ) + elif field not in cls._meta.local_fields: + errors.append( + checks.Error( + "'%s' refers to field '%s' which is not local to model '%s'." + % (option, field_name, cls._meta.object_name), + hint="This issue may be caused by multi-table inheritance.", + obj=cls, + id='models.E016', + ) + ) + return errors + + @classmethod + def _check_ordering(cls): + """ + Check "ordering" option -- is it a list of strings and do all fields + exist? + """ + if cls._meta._ordering_clash: + return [ + checks.Error( + "'ordering' and 'order_with_respect_to' cannot be used together.", + obj=cls, + id='models.E021', + ), + ] + + if cls._meta.order_with_respect_to or not cls._meta.ordering: + return [] + + if not isinstance(cls._meta.ordering, (list, tuple)): + return [ + checks.Error( + "'ordering' must be a tuple or list (even if you want to order by only one field).", + obj=cls, + id='models.E014', + ) + ] + + errors = [] + fields = cls._meta.ordering + + # Skip expressions and '?' fields. + fields = (f for f in fields if isinstance(f, str) and f != '?') + + # Convert "-field" to "field". + fields = ((f[1:] if f.startswith('-') else f) for f in fields) + + # Skip ordering in the format field1__field2 (FIXME: checking + # this format would be nice, but it's a little fiddly). + fields = (f for f in fields if LOOKUP_SEP not in f) + + # Skip ordering on pk. This is always a valid order_by field + # but is an alias and therefore won't be found by opts.get_field. + fields = {f for f in fields if f != 'pk'} + + # Check for invalid or nonexistent fields in ordering. + invalid_fields = [] + + # Any field name that is not present in field_names does not exist. + # Also, ordering by m2m fields is not allowed. + opts = cls._meta + valid_fields = set(chain.from_iterable( + (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) + for f in chain(opts.fields, opts.related_objects) + )) + + invalid_fields.extend(fields - valid_fields) + + for invalid_field in invalid_fields: + errors.append( + checks.Error( + "'ordering' refers to the nonexistent field '%s'." % invalid_field, + obj=cls, + id='models.E015', + ) + ) + return errors + + @classmethod + def _check_long_column_names(cls): + """ + Check that any auto-generated column names are shorter than the limits + for each database in which the model will be created. + """ + errors = [] + allowed_len = None + db_alias = None + + # Find the minimum max allowed length among all specified db_aliases. + for db in settings.DATABASES: + # skip databases where the model won't be created + if not router.allow_migrate_model(db, cls): + continue + connection = connections[db] + max_name_length = connection.ops.max_name_length() + if max_name_length is None or connection.features.truncates_names: + continue + else: + if allowed_len is None: + allowed_len = max_name_length + db_alias = db + elif max_name_length < allowed_len: + allowed_len = max_name_length + db_alias = db + + if allowed_len is None: + return errors + + for f in cls._meta.local_fields: + _, column_name = f.get_attname_column() + + # Check if auto-generated name for the field is too long + # for the database. + if f.db_column is None and column_name is not None and len(column_name) > allowed_len: + errors.append( + checks.Error( + 'Autogenerated column name too long for field "%s". ' + 'Maximum length is "%s" for database "%s".' + % (column_name, allowed_len, db_alias), + hint="Set the column name manually using 'db_column'.", + obj=cls, + id='models.E018', + ) + ) + + for f in cls._meta.local_many_to_many: + # Skip nonexistent models. + if isinstance(f.remote_field.through, str): + continue + + # Check if auto-generated name for the M2M field is too long + # for the database. + for m2m in f.remote_field.through._meta.local_fields: + _, rel_name = m2m.get_attname_column() + if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: + errors.append( + checks.Error( + 'Autogenerated column name too long for M2M field ' + '"%s". Maximum length is "%s" for database "%s".' + % (rel_name, allowed_len, db_alias), + hint=( + "Use 'through' to create a separate model for " + "M2M and then set column_name using 'db_column'." + ), + obj=cls, + id='models.E019', + ) + ) + + return errors + + +############################################ +# HELPER FUNCTIONS (CURRIED MODEL METHODS) # +############################################ + +# ORDERING METHODS ######################### + +def method_set_order(self, ordered_obj, id_list, using=None): + if using is None: + using = DEFAULT_DB_ALIAS + order_wrt = ordered_obj._meta.order_with_respect_to + filter_args = order_wrt.get_forward_related_filter(self) + # FIXME: It would be nice if there was an "update many" version of update + # for situations like this. + with transaction.atomic(using=using, savepoint=False): + for i, j in enumerate(id_list): + ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i) + + +def method_get_order(self, ordered_obj): + order_wrt = ordered_obj._meta.order_with_respect_to + filter_args = order_wrt.get_forward_related_filter(self) + pk_name = ordered_obj._meta.pk.name + return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) + + +def make_foreign_order_accessors(model, related_model): + setattr( + related_model, + 'get_%s_order' % model.__name__.lower(), + partialmethod(method_get_order, model) + ) + setattr( + related_model, + 'set_%s_order' % model.__name__.lower(), + partialmethod(method_set_order, model) + ) + +######## +# MISC # +######## + + +def model_unpickle(model_id): + """Used to unpickle Model subclasses with deferred fields.""" + if isinstance(model_id, tuple): + model = apps.get_model(*model_id) + else: + # Backwards compat - the model was cached directly in earlier versions. + model = model_id + return model.__new__(model) + + +model_unpickle.__safe_for_unpickle__ = True + + +def unpickle_inner_exception(klass, exception_name): + # Get the exception class from the class it is attached to: + exception = getattr(klass, exception_name) + return exception.__new__(exception) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/constants.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e6c252d93b70fe2c2c846b12bd6de0c569bf20 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/constants.py @@ -0,0 +1,6 @@ +""" +Constants used across the ORM in general. +""" + +# Separator used to split filter strings apart. +LOOKUP_SEP = '__' diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/deletion.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/deletion.py new file mode 100644 index 0000000000000000000000000000000000000000..753f4f5076ff1da60703189c35553916b45651f6 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/deletion.py @@ -0,0 +1,318 @@ +from collections import Counter, OrderedDict +from operator import attrgetter + +from django.db import IntegrityError, connections, transaction +from django.db.models import signals, sql + + +class ProtectedError(IntegrityError): + def __init__(self, msg, protected_objects): + self.protected_objects = protected_objects + super().__init__(msg, protected_objects) + + +def CASCADE(collector, field, sub_objs, using): + collector.collect(sub_objs, source=field.remote_field.model, + source_attr=field.name, nullable=field.null) + if field.null and not connections[using].features.can_defer_constraint_checks: + collector.add_field_update(field, None, sub_objs) + + +def PROTECT(collector, field, sub_objs, using): + raise ProtectedError( + "Cannot delete some instances of model '%s' because they are " + "referenced through a protected foreign key: '%s.%s'" % ( + field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name + ), + sub_objs + ) + + +def SET(value): + if callable(value): + def set_on_delete(collector, field, sub_objs, using): + collector.add_field_update(field, value(), sub_objs) + else: + def set_on_delete(collector, field, sub_objs, using): + collector.add_field_update(field, value, sub_objs) + set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {}) + return set_on_delete + + +def SET_NULL(collector, field, sub_objs, using): + collector.add_field_update(field, None, sub_objs) + + +def SET_DEFAULT(collector, field, sub_objs, using): + collector.add_field_update(field, field.get_default(), sub_objs) + + +def DO_NOTHING(collector, field, sub_objs, using): + pass + + +def get_candidate_relations_to_delete(opts): + # The candidate relations are the ones that come from N-1 and 1-1 relations. + # N-N (i.e., many-to-many) relations aren't candidates for deletion. + return ( + f for f in opts.get_fields(include_hidden=True) + if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many) + ) + + +class Collector: + def __init__(self, using): + self.using = using + # Initially, {model: {instances}}, later values become lists. + self.data = OrderedDict() + self.field_updates = {} # {model: {(field, value): {instances}}} + # fast_deletes is a list of queryset-likes that can be deleted without + # fetching the objects into memory. + self.fast_deletes = [] + + # Tracks deletion-order dependency for databases without transactions + # or ability to defer constraint checks. Only concrete model classes + # should be included, as the dependencies exist only between actual + # database tables; proxy models are represented here by their concrete + # parent. + self.dependencies = {} # {model: {models}} + + def add(self, objs, source=None, nullable=False, reverse_dependency=False): + """ + Add 'objs' to the collection of objects to be deleted. If the call is + the result of a cascade, 'source' should be the model that caused it, + and 'nullable' should be set to True if the relation can be null. + + Return a list of all objects that were not already collected. + """ + if not objs: + return [] + new_objs = [] + model = objs[0].__class__ + instances = self.data.setdefault(model, set()) + for obj in objs: + if obj not in instances: + new_objs.append(obj) + instances.update(new_objs) + # Nullable relationships can be ignored -- they are nulled out before + # deleting, and therefore do not affect the order in which objects have + # to be deleted. + if source is not None and not nullable: + if reverse_dependency: + source, model = model, source + self.dependencies.setdefault( + source._meta.concrete_model, set()).add(model._meta.concrete_model) + return new_objs + + def add_field_update(self, field, value, objs): + """ + Schedule a field update. 'objs' must be a homogeneous iterable + collection of model instances (e.g. a QuerySet). + """ + if not objs: + return + model = objs[0].__class__ + self.field_updates.setdefault( + model, {}).setdefault( + (field, value), set()).update(objs) + + def can_fast_delete(self, objs, from_field=None): + """ + Determine if the objects in the given queryset-like can be + fast-deleted. This can be done if there are no cascades, no + parents and no signal listeners for the object class. + + The 'from_field' tells where we are coming from - we need this to + determine if the objects are in fact to be deleted. Allow also + skipping parent -> child -> parent chain preventing fast delete of + the child. + """ + if from_field and from_field.remote_field.on_delete is not CASCADE: + return False + if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')): + return False + model = objs.model + if (signals.pre_delete.has_listeners(model) or + signals.post_delete.has_listeners(model) or + signals.m2m_changed.has_listeners(model)): + return False + # The use of from_field comes from the need to avoid cascade back to + # parent when parent delete is cascading to child. + opts = model._meta + if any(link != from_field for link in opts.concrete_model._meta.parents.values()): + return False + # Foreign keys pointing to this model, both from m2m and other + # models. + for related in get_candidate_relations_to_delete(opts): + if related.field.remote_field.on_delete is not DO_NOTHING: + return False + for field in model._meta.private_fields: + if hasattr(field, 'bulk_related_objects'): + # It's something like generic foreign key. + return False + return True + + def get_del_batches(self, objs, field): + """ + Return the objs in suitably sized batches for the used connection. + """ + conn_batch_size = max( + connections[self.using].ops.bulk_batch_size([field.name], objs), 1) + if len(objs) > conn_batch_size: + return [objs[i:i + conn_batch_size] + for i in range(0, len(objs), conn_batch_size)] + else: + return [objs] + + def collect(self, objs, source=None, nullable=False, collect_related=True, + source_attr=None, reverse_dependency=False, keep_parents=False): + """ + Add 'objs' to the collection of objects to be deleted as well as all + parent instances. 'objs' must be a homogeneous iterable collection of + model instances (e.g. a QuerySet). If 'collect_related' is True, + related objects will be handled by their respective on_delete handler. + + If the call is the result of a cascade, 'source' should be the model + that caused it and 'nullable' should be set to True, if the relation + can be null. + + If 'reverse_dependency' is True, 'source' will be deleted before the + current model, rather than after. (Needed for cascading to parent + models, the one case in which the cascade follows the forwards + direction of an FK rather than the reverse direction.) + + If 'keep_parents' is True, data of parent model's will be not deleted. + """ + if self.can_fast_delete(objs): + self.fast_deletes.append(objs) + return + new_objs = self.add(objs, source, nullable, + reverse_dependency=reverse_dependency) + if not new_objs: + return + + model = new_objs[0].__class__ + + if not keep_parents: + # Recursively collect concrete model's parent models, but not their + # related objects. These will be found by meta.get_fields() + concrete_model = model._meta.concrete_model + for ptr in concrete_model._meta.parents.values(): + if ptr: + parent_objs = [getattr(obj, ptr.name) for obj in new_objs] + self.collect(parent_objs, source=model, + source_attr=ptr.remote_field.related_name, + collect_related=False, + reverse_dependency=True) + if collect_related: + parents = model._meta.parents + for related in get_candidate_relations_to_delete(model._meta): + # Preserve parent reverse relationships if keep_parents=True. + if keep_parents and related.model in parents: + continue + field = related.field + if field.remote_field.on_delete == DO_NOTHING: + continue + batches = self.get_del_batches(new_objs, field) + for batch in batches: + sub_objs = self.related_objects(related, batch) + if self.can_fast_delete(sub_objs, from_field=field): + self.fast_deletes.append(sub_objs) + elif sub_objs: + field.remote_field.on_delete(self, field, sub_objs, self.using) + for field in model._meta.private_fields: + if hasattr(field, 'bulk_related_objects'): + # It's something like generic foreign key. + sub_objs = field.bulk_related_objects(new_objs, self.using) + self.collect(sub_objs, source=model, nullable=True) + + def related_objects(self, related, objs): + """ + Get a QuerySet of objects related to `objs` via the relation `related`. + """ + return related.related_model._base_manager.using(self.using).filter( + **{"%s__in" % related.field.name: objs} + ) + + def instances_with_model(self): + for model, instances in self.data.items(): + for obj in instances: + yield model, obj + + def sort(self): + sorted_models = [] + concrete_models = set() + models = list(self.data) + while len(sorted_models) < len(models): + found = False + for model in models: + if model in sorted_models: + continue + dependencies = self.dependencies.get(model._meta.concrete_model) + if not (dependencies and dependencies.difference(concrete_models)): + sorted_models.append(model) + concrete_models.add(model._meta.concrete_model) + found = True + if not found: + return + self.data = OrderedDict((model, self.data[model]) + for model in sorted_models) + + def delete(self): + # sort instance collections + for model, instances in self.data.items(): + self.data[model] = sorted(instances, key=attrgetter("pk")) + + # if possible, bring the models in an order suitable for databases that + # don't support transactions or cannot defer constraint checks until the + # end of a transaction. + self.sort() + # number of objects deleted for each model label + deleted_counter = Counter() + + with transaction.atomic(using=self.using, savepoint=False): + # send pre_delete signals + for model, obj in self.instances_with_model(): + if not model._meta.auto_created: + signals.pre_delete.send( + sender=model, instance=obj, using=self.using + ) + + # fast deletes + for qs in self.fast_deletes: + count = qs._raw_delete(using=self.using) + deleted_counter[qs.model._meta.label] += count + + # update fields + for model, instances_for_fieldvalues in self.field_updates.items(): + for (field, value), instances in instances_for_fieldvalues.items(): + query = sql.UpdateQuery(model) + query.update_batch([obj.pk for obj in instances], + {field.name: value}, self.using) + + # reverse instance collections + for instances in self.data.values(): + instances.reverse() + + # delete instances + for model, instances in self.data.items(): + query = sql.DeleteQuery(model) + pk_list = [obj.pk for obj in instances] + count = query.delete_batch(pk_list, self.using) + deleted_counter[model._meta.label] += count + + if not model._meta.auto_created: + for obj in instances: + signals.post_delete.send( + sender=model, instance=obj, using=self.using + ) + + # update collected instances + for model, instances_for_fieldvalues in self.field_updates.items(): + for (field, value), instances in instances_for_fieldvalues.items(): + for obj in instances: + setattr(obj, field.attname, value) + for model, instances in self.data.items(): + for instance in instances: + setattr(instance, model._meta.pk.attname, None) + return sum(deleted_counter.values()), dict(deleted_counter) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/expressions.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..308d62c4b3d20505b948254c70fc8bc8b96a446c --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/expressions.py @@ -0,0 +1,1329 @@ +import copy +import datetime +from decimal import Decimal + +from django.core.exceptions import EmptyResultSet, FieldError +from django.db import connection +from django.db.models import fields +from django.db.models.query_utils import Q +from django.utils.deconstruct import deconstructible +from django.utils.functional import cached_property + + +class SQLiteNumericMixin: + """ + Some expressions with output_field=DecimalField() must be cast to + numeric to be properly filtered. + """ + def as_sqlite(self, compiler, connection, **extra_context): + sql, params = self.as_sql(compiler, connection, **extra_context) + try: + if self.output_field.get_internal_type() == 'DecimalField': + sql = 'CAST(%s AS NUMERIC)' % sql + except FieldError: + pass + return sql, params + + +class Combinable: + """ + Provide the ability to combine one or two objects with + some connector. For example F('foo') + F('bar'). + """ + + # Arithmetic connectors + ADD = '+' + SUB = '-' + MUL = '*' + DIV = '/' + POW = '^' + # The following is a quoted % operator - it is quoted because it can be + # used in strings that also have parameter substitution. + MOD = '%%' + + # Bitwise operators - note that these are generated by .bitand() + # and .bitor(), the '&' and '|' are reserved for boolean operator + # usage. + BITAND = '&' + BITOR = '|' + BITLEFTSHIFT = '<<' + BITRIGHTSHIFT = '>>' + + def _combine(self, other, connector, reversed): + if not hasattr(other, 'resolve_expression'): + # everything must be resolvable to an expression + if isinstance(other, datetime.timedelta): + other = DurationValue(other, output_field=fields.DurationField()) + else: + other = Value(other) + + if reversed: + return CombinedExpression(other, connector, self) + return CombinedExpression(self, connector, other) + + ############# + # OPERATORS # + ############# + + def __add__(self, other): + return self._combine(other, self.ADD, False) + + def __sub__(self, other): + return self._combine(other, self.SUB, False) + + def __mul__(self, other): + return self._combine(other, self.MUL, False) + + def __truediv__(self, other): + return self._combine(other, self.DIV, False) + + def __mod__(self, other): + return self._combine(other, self.MOD, False) + + def __pow__(self, other): + return self._combine(other, self.POW, False) + + def __and__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + + def bitand(self, other): + return self._combine(other, self.BITAND, False) + + def bitleftshift(self, other): + return self._combine(other, self.BITLEFTSHIFT, False) + + def bitrightshift(self, other): + return self._combine(other, self.BITRIGHTSHIFT, False) + + def __or__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + + def bitor(self, other): + return self._combine(other, self.BITOR, False) + + def __radd__(self, other): + return self._combine(other, self.ADD, True) + + def __rsub__(self, other): + return self._combine(other, self.SUB, True) + + def __rmul__(self, other): + return self._combine(other, self.MUL, True) + + def __rtruediv__(self, other): + return self._combine(other, self.DIV, True) + + def __rmod__(self, other): + return self._combine(other, self.MOD, True) + + def __rpow__(self, other): + return self._combine(other, self.POW, True) + + def __rand__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + + def __ror__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + + +@deconstructible +class BaseExpression: + """Base class for all query expressions.""" + + # aggregate specific fields + is_summary = False + _output_field_resolved_to_none = False + # Can the expression be used in a WHERE clause? + filterable = True + # Can the expression can be used as a source expression in Window? + window_compatible = False + + def __init__(self, output_field=None): + if output_field is not None: + self.output_field = output_field + + def __getstate__(self): + # This method required only for Python 3.4. + state = self.__dict__.copy() + state.pop('convert_value', None) + return state + + def get_db_converters(self, connection): + return ( + [] + if self.convert_value is self._convert_value_noop else + [self.convert_value] + ) + self.output_field.get_db_converters(connection) + + def get_source_expressions(self): + return [] + + def set_source_expressions(self, exprs): + assert len(exprs) == 0 + + def _parse_expressions(self, *expressions): + return [ + arg if hasattr(arg, 'resolve_expression') else ( + F(arg) if isinstance(arg, str) else Value(arg) + ) for arg in expressions + ] + + def as_sql(self, compiler, connection): + """ + Responsible for returning a (sql, [params]) tuple to be included + in the current query. + + Different backends can provide their own implementation, by + providing an `as_{vendor}` method and patching the Expression: + + ``` + def override_as_sql(self, compiler, connection): + # custom logic + return super().as_sql(compiler, connection) + setattr(Expression, 'as_' + connection.vendor, override_as_sql) + ``` + + Arguments: + * compiler: the query compiler responsible for generating the query. + Must have a compile method, returning a (sql, [params]) tuple. + Calling compiler(value) will return a quoted `value`. + + * connection: the database connection used for the current query. + + Return: (sql, params) + Where `sql` is a string containing ordered sql parameters to be + replaced with the elements of the list `params`. + """ + raise NotImplementedError("Subclasses must implement as_sql()") + + @cached_property + def contains_aggregate(self): + for expr in self.get_source_expressions(): + if expr and expr.contains_aggregate: + return True + return False + + @cached_property + def contains_over_clause(self): + for expr in self.get_source_expressions(): + if expr and expr.contains_over_clause: + return True + return False + + @cached_property + def contains_column_references(self): + for expr in self.get_source_expressions(): + if expr and expr.contains_column_references: + return True + return False + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + """ + Provide the chance to do any preprocessing or validation before being + added to the query. + + Arguments: + * query: the backend query implementation + * allow_joins: boolean allowing or denying use of joins + in this query + * reuse: a set of reusable joins for multijoins + * summarize: a terminal aggregate clause + * for_save: whether this expression about to be used in a save or update + + Return: an Expression to be added to the query. + """ + c = self.copy() + c.is_summary = summarize + c.set_source_expressions([ + expr.resolve_expression(query, allow_joins, reuse, summarize) + if expr else None + for expr in c.get_source_expressions() + ]) + return c + + def _prepare(self, field): + """Hook used by Lookup.get_prep_lookup() to do custom preparation.""" + return self + + @property + def field(self): + return self.output_field + + @cached_property + def output_field(self): + """Return the output type of this expressions.""" + output_field = self._resolve_output_field() + if output_field is None: + self._output_field_resolved_to_none = True + raise FieldError('Cannot resolve expression type, unknown output_field') + return output_field + + @cached_property + def _output_field_or_none(self): + """ + Return the output field of this expression, or None if + _resolve_output_field() didn't return an output type. + """ + try: + return self.output_field + except FieldError: + if not self._output_field_resolved_to_none: + raise + + def _resolve_output_field(self): + """ + Attempt to infer the output type of the expression. If the output + fields of all source fields match then, simply infer the same type + here. This isn't always correct, but it makes sense most of the time. + + Consider the difference between `2 + 2` and `2 / 3`. Inferring + the type here is a convenience for the common case. The user should + supply their own output_field with more complex computations. + + If a source's output field resolves to None, exclude it from this check. + If all sources are None, then an error is raised higher up the stack in + the output_field property. + """ + sources_iter = (source for source in self.get_source_fields() if source is not None) + for output_field in sources_iter: + if any(not isinstance(output_field, source.__class__) for source in sources_iter): + raise FieldError('Expression contains mixed types. You must set output_field.') + return output_field + + @staticmethod + def _convert_value_noop(value, expression, connection): + return value + + @cached_property + def convert_value(self): + """ + Expressions provide their own converters because users have the option + of manually specifying the output_field which may be a different type + from the one the database returns. + """ + field = self.output_field + internal_type = field.get_internal_type() + if internal_type == 'FloatField': + return lambda value, expression, connection: None if value is None else float(value) + elif internal_type.endswith('IntegerField'): + return lambda value, expression, connection: None if value is None else int(value) + elif internal_type == 'DecimalField': + return lambda value, expression, connection: None if value is None else Decimal(value) + return self._convert_value_noop + + def get_lookup(self, lookup): + return self.output_field.get_lookup(lookup) + + def get_transform(self, name): + return self.output_field.get_transform(name) + + def relabeled_clone(self, change_map): + clone = self.copy() + clone.set_source_expressions([ + e.relabeled_clone(change_map) if e is not None else None + for e in self.get_source_expressions() + ]) + return clone + + def copy(self): + return copy.copy(self) + + def get_group_by_cols(self): + if not self.contains_aggregate: + return [self] + cols = [] + for source in self.get_source_expressions(): + cols.extend(source.get_group_by_cols()) + return cols + + def get_source_fields(self): + """Return the underlying field types used by this aggregate.""" + return [e._output_field_or_none for e in self.get_source_expressions()] + + def asc(self, **kwargs): + return OrderBy(self, **kwargs) + + def desc(self, **kwargs): + return OrderBy(self, descending=True, **kwargs) + + def reverse_ordering(self): + return self + + def flatten(self): + """ + Recursively yield this expression and all subexpressions, in + depth-first order. + """ + yield self + for expr in self.get_source_expressions(): + if expr: + yield from expr.flatten() + + def __eq__(self, other): + if self.__class__ != other.__class__: + return False + path, args, kwargs = self.deconstruct() + other_path, other_args, other_kwargs = other.deconstruct() + if (path, args) == (other_path, other_args): + kwargs = kwargs.copy() + other_kwargs = other_kwargs.copy() + output_field = type(kwargs.pop('output_field', None)) + other_output_field = type(other_kwargs.pop('output_field', None)) + if output_field == other_output_field: + return kwargs == other_kwargs + return False + + def __hash__(self): + path, args, kwargs = self.deconstruct() + kwargs = kwargs.copy() + output_field = type(kwargs.pop('output_field', None)) + return hash((path, output_field) + args + tuple([ + (key, tuple(value)) if isinstance(value, list) else (key, value) + for key, value in kwargs.items() + ])) + + +class Expression(BaseExpression, Combinable): + """An expression that can be combined with other expressions.""" + pass + + +class CombinedExpression(SQLiteNumericMixin, Expression): + + def __init__(self, lhs, connector, rhs, output_field=None): + super().__init__(output_field=output_field) + self.connector = connector + self.lhs = lhs + self.rhs = rhs + + def __repr__(self): + return "<{}: {}>".format(self.__class__.__name__, self) + + def __str__(self): + return "{} {} {}".format(self.lhs, self.connector, self.rhs) + + def get_source_expressions(self): + return [self.lhs, self.rhs] + + def set_source_expressions(self, exprs): + self.lhs, self.rhs = exprs + + def as_sql(self, compiler, connection): + try: + lhs_output = self.lhs.output_field + except FieldError: + lhs_output = None + try: + rhs_output = self.rhs.output_field + except FieldError: + rhs_output = None + if (not connection.features.has_native_duration_field and + ((lhs_output and lhs_output.get_internal_type() == 'DurationField') or + (rhs_output and rhs_output.get_internal_type() == 'DurationField'))): + return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection) + if (lhs_output and rhs_output and self.connector == self.SUB and + lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and + lhs_output.get_internal_type() == rhs_output.get_internal_type()): + return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection) + expressions = [] + expression_params = [] + sql, params = compiler.compile(self.lhs) + expressions.append(sql) + expression_params.extend(params) + sql, params = compiler.compile(self.rhs) + expressions.append(sql) + expression_params.extend(params) + # order of precedence + expression_wrapper = '(%s)' + sql = connection.ops.combine_expression(self.connector, expressions) + return expression_wrapper % sql, expression_params + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + c = self.copy() + c.is_summary = summarize + c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) + c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) + return c + + +class DurationExpression(CombinedExpression): + def compile(self, side, compiler, connection): + if not isinstance(side, DurationValue): + try: + output = side.output_field + except FieldError: + pass + else: + if output.get_internal_type() == 'DurationField': + sql, params = compiler.compile(side) + return connection.ops.format_for_duration_arithmetic(sql), params + return compiler.compile(side) + + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + expressions = [] + expression_params = [] + sql, params = self.compile(self.lhs, compiler, connection) + expressions.append(sql) + expression_params.extend(params) + sql, params = self.compile(self.rhs, compiler, connection) + expressions.append(sql) + expression_params.extend(params) + # order of precedence + expression_wrapper = '(%s)' + sql = connection.ops.combine_duration_expression(self.connector, expressions) + return expression_wrapper % sql, expression_params + + +class TemporalSubtraction(CombinedExpression): + output_field = fields.DurationField() + + def __init__(self, lhs, rhs): + super().__init__(lhs, self.SUB, rhs) + + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + lhs = compiler.compile(self.lhs, connection) + rhs = compiler.compile(self.rhs, connection) + return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) + + +@deconstructible +class F(Combinable): + """An object capable of resolving references to existing query objects.""" + # Can the expression be used in a WHERE clause? + filterable = True + + def __init__(self, name): + """ + Arguments: + * name: the name of the field this expression references + """ + self.name = name + + def __repr__(self): + return "{}({})".format(self.__class__.__name__, self.name) + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + return query.resolve_ref(self.name, allow_joins, reuse, summarize) + + def asc(self, **kwargs): + return OrderBy(self, **kwargs) + + def desc(self, **kwargs): + return OrderBy(self, descending=True, **kwargs) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.name == other.name + + def __hash__(self): + return hash(self.name) + + +class ResolvedOuterRef(F): + """ + An object that contains a reference to an outer query. + + In this case, the reference to the outer query has been resolved because + the inner query has been used as a subquery. + """ + def as_sql(self, *args, **kwargs): + raise ValueError( + 'This queryset contains a reference to an outer query and may ' + 'only be used in a subquery.' + ) + + def _prepare(self, output_field=None): + return self + + +class OuterRef(F): + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + if isinstance(self.name, self.__class__): + return self.name + return ResolvedOuterRef(self.name) + + def _prepare(self, output_field=None): + return self + + +class Func(SQLiteNumericMixin, Expression): + """An SQL function call.""" + function = None + template = '%(function)s(%(expressions)s)' + arg_joiner = ', ' + arity = None # The number of arguments the function accepts. + + def __init__(self, *expressions, output_field=None, **extra): + if self.arity is not None and len(expressions) != self.arity: + raise TypeError( + "'%s' takes exactly %s %s (%s given)" % ( + self.__class__.__name__, + self.arity, + "argument" if self.arity == 1 else "arguments", + len(expressions), + ) + ) + super().__init__(output_field=output_field) + self.source_expressions = self._parse_expressions(*expressions) + self.extra = extra + + def __repr__(self): + args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) + extra = dict(self.extra, **self._get_repr_options()) + if extra: + extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) + return "{}({}, {})".format(self.__class__.__name__, args, extra) + return "{}({})".format(self.__class__.__name__, args) + + def _get_repr_options(self): + """Return a dict of extra __init__() options to include in the repr.""" + return {} + + def get_source_expressions(self): + return self.source_expressions + + def set_source_expressions(self, exprs): + self.source_expressions = exprs + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + c = self.copy() + c.is_summary = summarize + for pos, arg in enumerate(c.source_expressions): + c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) + return c + + def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): + connection.ops.check_expression_support(self) + sql_parts = [] + params = [] + for arg in self.source_expressions: + arg_sql, arg_params = compiler.compile(arg) + sql_parts.append(arg_sql) + params.extend(arg_params) + data = self.extra.copy() + data.update(**extra_context) + # Use the first supplied value in this order: the parameter to this + # method, a value supplied in __init__()'s **extra (the value in + # `data`), or the value defined on the class. + if function is not None: + data['function'] = function + else: + data.setdefault('function', self.function) + template = template or data.get('template', self.template) + arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) + data['expressions'] = data['field'] = arg_joiner.join(sql_parts) + return template % data, params + + def copy(self): + copy = super().copy() + copy.source_expressions = self.source_expressions[:] + copy.extra = self.extra.copy() + return copy + + +class Value(Expression): + """Represent a wrapped value as a node within an expression.""" + def __init__(self, value, output_field=None): + """ + Arguments: + * value: the value this expression represents. The value will be + added into the sql parameter list and properly quoted. + + * output_field: an instance of the model field type that this + expression will return, such as IntegerField() or CharField(). + """ + super().__init__(output_field=output_field) + self.value = value + + def __repr__(self): + return "{}({})".format(self.__class__.__name__, self.value) + + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + val = self.value + output_field = self._output_field_or_none + if output_field is not None: + if self.for_save: + val = output_field.get_db_prep_save(val, connection=connection) + else: + val = output_field.get_db_prep_value(val, connection=connection) + if hasattr(output_field, 'get_placeholder'): + return output_field.get_placeholder(val, compiler, connection), [val] + if val is None: + # cx_Oracle does not always convert None to the appropriate + # NULL type (like in case expressions using numbers), so we + # use a literal SQL NULL + return 'NULL', [] + return '%s', [val] + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) + c.for_save = for_save + return c + + def get_group_by_cols(self): + return [] + + +class DurationValue(Value): + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + if connection.features.has_native_duration_field: + return super().as_sql(compiler, connection) + return connection.ops.date_interval_sql(self.value), [] + + +class RawSQL(Expression): + def __init__(self, sql, params, output_field=None): + if output_field is None: + output_field = fields.Field() + self.sql, self.params = sql, params + super().__init__(output_field=output_field) + + def __repr__(self): + return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) + + def as_sql(self, compiler, connection): + return '(%s)' % self.sql, self.params + + def get_group_by_cols(self): + return [self] + + def __hash__(self): + h = hash(self.sql) ^ hash(self.output_field) + for param in self.params: + h ^= hash(param) + return h + + +class Star(Expression): + def __repr__(self): + return "'*'" + + def as_sql(self, compiler, connection): + return '*', [] + + +class Random(Expression): + output_field = fields.FloatField() + + def __repr__(self): + return "Random()" + + def as_sql(self, compiler, connection): + return connection.ops.random_function_sql(), [] + + +class Col(Expression): + + contains_column_references = True + + def __init__(self, alias, target, output_field=None): + if output_field is None: + output_field = target + super().__init__(output_field=output_field) + self.alias, self.target = alias, target + + def __repr__(self): + return "{}({}, {})".format( + self.__class__.__name__, self.alias, self.target) + + def as_sql(self, compiler, connection): + qn = compiler.quote_name_unless_alias + return "%s.%s" % (qn(self.alias), qn(self.target.column)), [] + + def relabeled_clone(self, relabels): + return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) + + def get_group_by_cols(self): + return [self] + + def get_db_converters(self, connection): + if self.target == self.output_field: + return self.output_field.get_db_converters(connection) + return (self.output_field.get_db_converters(connection) + + self.target.get_db_converters(connection)) + + +class Ref(Expression): + """ + Reference to column alias of the query. For example, Ref('sum_cost') in + qs.annotate(sum_cost=Sum('cost')) query. + """ + def __init__(self, refs, source): + super().__init__() + self.refs, self.source = refs, source + + def __repr__(self): + return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) + + def get_source_expressions(self): + return [self.source] + + def set_source_expressions(self, exprs): + self.source, = exprs + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + # The sub-expression `source` has already been resolved, as this is + # just a reference to the name of `source`. + return self + + def relabeled_clone(self, relabels): + return self + + def as_sql(self, compiler, connection): + return "%s" % connection.ops.quote_name(self.refs), [] + + def get_group_by_cols(self): + return [self] + + +class ExpressionList(Func): + """ + An expression containing multiple expressions. Can be used to provide a + list of expressions as an argument to another expression, like an + ordering clause. + """ + template = '%(expressions)s' + + def __init__(self, *expressions, **extra): + if len(expressions) == 0: + raise ValueError('%s requires at least one expression.' % self.__class__.__name__) + super().__init__(*expressions, **extra) + + def __str__(self): + return self.arg_joiner.join(str(arg) for arg in self.source_expressions) + + +class ExpressionWrapper(Expression): + """ + An expression that can wrap another expression so that it can provide + extra context to the inner expression, such as the output_field. + """ + + def __init__(self, expression, output_field): + super().__init__(output_field=output_field) + self.expression = expression + + def set_source_expressions(self, exprs): + self.expression = exprs[0] + + def get_source_expressions(self): + return [self.expression] + + def as_sql(self, compiler, connection): + return self.expression.as_sql(compiler, connection) + + def __repr__(self): + return "{}({})".format(self.__class__.__name__, self.expression) + + +class When(Expression): + template = 'WHEN %(condition)s THEN %(result)s' + + def __init__(self, condition=None, then=None, **lookups): + if lookups and condition is None: + condition, lookups = Q(**lookups), None + if condition is None or not isinstance(condition, Q) or lookups: + raise TypeError("__init__() takes either a Q object or lookups as keyword arguments") + super().__init__(output_field=None) + self.condition = condition + self.result = self._parse_expressions(then)[0] + + def __str__(self): + return "WHEN %r THEN %r" % (self.condition, self.result) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + def get_source_expressions(self): + return [self.condition, self.result] + + def set_source_expressions(self, exprs): + self.condition, self.result = exprs + + def get_source_fields(self): + # We're only interested in the fields of the result expressions. + return [self.result._output_field_or_none] + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + c = self.copy() + c.is_summary = summarize + if hasattr(c.condition, 'resolve_expression'): + c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) + c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) + return c + + def as_sql(self, compiler, connection, template=None, **extra_context): + connection.ops.check_expression_support(self) + template_params = extra_context + sql_params = [] + condition_sql, condition_params = compiler.compile(self.condition) + template_params['condition'] = condition_sql + sql_params.extend(condition_params) + result_sql, result_params = compiler.compile(self.result) + template_params['result'] = result_sql + sql_params.extend(result_params) + template = template or self.template + return template % template_params, sql_params + + def get_group_by_cols(self): + # This is not a complete expression and cannot be used in GROUP BY. + cols = [] + for source in self.get_source_expressions(): + cols.extend(source.get_group_by_cols()) + return cols + + +class Case(Expression): + """ + An SQL searched CASE expression: + + CASE + WHEN n > 0 + THEN 'positive' + WHEN n < 0 + THEN 'negative' + ELSE 'zero' + END + """ + template = 'CASE %(cases)s ELSE %(default)s END' + case_joiner = ' ' + + def __init__(self, *cases, default=None, output_field=None, **extra): + if not all(isinstance(case, When) for case in cases): + raise TypeError("Positional arguments must all be When objects.") + super().__init__(output_field) + self.cases = list(cases) + self.default = self._parse_expressions(default)[0] + self.extra = extra + + def __str__(self): + return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + def get_source_expressions(self): + return self.cases + [self.default] + + def set_source_expressions(self, exprs): + self.cases = exprs[:-1] + self.default = exprs[-1] + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + c = self.copy() + c.is_summary = summarize + for pos, case in enumerate(c.cases): + c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) + c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) + return c + + def copy(self): + c = super().copy() + c.cases = c.cases[:] + return c + + def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): + connection.ops.check_expression_support(self) + if not self.cases: + return compiler.compile(self.default) + template_params = self.extra.copy() + template_params.update(extra_context) + case_parts = [] + sql_params = [] + for case in self.cases: + try: + case_sql, case_params = compiler.compile(case) + except EmptyResultSet: + continue + case_parts.append(case_sql) + sql_params.extend(case_params) + default_sql, default_params = compiler.compile(self.default) + if not case_parts: + return default_sql, default_params + case_joiner = case_joiner or self.case_joiner + template_params['cases'] = case_joiner.join(case_parts) + template_params['default'] = default_sql + sql_params.extend(default_params) + template = template or template_params.get('template', self.template) + sql = template % template_params + if self._output_field_or_none is not None: + sql = connection.ops.unification_cast_sql(self.output_field) % sql + return sql, sql_params + + +class Subquery(Expression): + """ + An explicit subquery. It may contain OuterRef() references to the outer + query which will be resolved when it is applied to that query. + """ + template = '(%(subquery)s)' + + def __init__(self, queryset, output_field=None, **extra): + self.queryset = queryset + self.extra = extra + super().__init__(output_field) + + def _resolve_output_field(self): + if len(self.queryset.query.select) == 1: + return self.queryset.query.select[0].field + return super()._resolve_output_field() + + def copy(self): + clone = super().copy() + clone.queryset = clone.queryset.all() + return clone + + def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): + clone = self.copy() + clone.is_summary = summarize + clone.queryset.query.bump_prefix(query) + + # Need to recursively resolve these. + def resolve_all(child): + if hasattr(child, 'children'): + [resolve_all(_child) for _child in child.children] + if hasattr(child, 'rhs'): + child.rhs = resolve(child.rhs) + + def resolve(child): + if hasattr(child, 'resolve_expression'): + resolved = child.resolve_expression( + query=query, allow_joins=allow_joins, reuse=reuse, + summarize=summarize, for_save=for_save, + ) + # Add table alias to the parent query's aliases to prevent + # quoting. + if hasattr(resolved, 'alias') and resolved.alias != resolved.target.model._meta.db_table: + clone.queryset.query.external_aliases.add(resolved.alias) + return resolved + return child + + resolve_all(clone.queryset.query.where) + + for key, value in clone.queryset.query.annotations.items(): + if isinstance(value, Subquery): + clone.queryset.query.annotations[key] = resolve(value) + + return clone + + def get_source_expressions(self): + return [ + x for x in [ + getattr(expr, 'lhs', None) + for expr in self.queryset.query.where.children + ] if x + ] + + def relabeled_clone(self, change_map): + clone = self.copy() + clone.queryset.query = clone.queryset.query.relabeled_clone(change_map) + clone.queryset.query.external_aliases.update( + alias for alias in change_map.values() + if alias not in clone.queryset.query.alias_map + ) + return clone + + def as_sql(self, compiler, connection, template=None, **extra_context): + connection.ops.check_expression_support(self) + template_params = self.extra.copy() + template_params.update(extra_context) + template_params['subquery'], sql_params = self.queryset.query.get_compiler(connection=connection).as_sql() + + template = template or template_params.get('template', self.template) + sql = template % template_params + return sql, sql_params + + def _prepare(self, output_field): + # This method will only be called if this instance is the "rhs" in an + # expression: the wrapping () must be removed (as the expression that + # contains this will provide them). SQLite evaluates ((subquery)) + # differently than the other databases. + if self.template == '(%(subquery)s)': + clone = self.copy() + clone.template = '%(subquery)s' + return clone + return self + + +class Exists(Subquery): + template = 'EXISTS(%(subquery)s)' + output_field = fields.BooleanField() + + def __init__(self, *args, negated=False, **kwargs): + self.negated = negated + super().__init__(*args, **kwargs) + + def __invert__(self): + return type(self)(self.queryset, negated=(not self.negated), **self.extra) + + def resolve_expression(self, query=None, *args, **kwargs): + # As a performance optimization, remove ordering since EXISTS doesn't + # care about it, just whether or not a row matches. + self.queryset = self.queryset.order_by() + return super().resolve_expression(query, *args, **kwargs) + + def as_sql(self, compiler, connection, template=None, **extra_context): + sql, params = super().as_sql(compiler, connection, template, **extra_context) + if self.negated: + sql = 'NOT {}'.format(sql) + return sql, params + + def as_oracle(self, compiler, connection, template=None, **extra_context): + # Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a + # CASE WHEN expression. Change the template since the When expression + # requires a left hand side (column) to compare against. + sql, params = self.as_sql(compiler, connection, template, **extra_context) + sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) + return sql, params + + +class OrderBy(BaseExpression): + template = '%(expression)s %(ordering)s' + + def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): + if nulls_first and nulls_last: + raise ValueError('nulls_first and nulls_last are mutually exclusive') + self.nulls_first = nulls_first + self.nulls_last = nulls_last + self.descending = descending + if not hasattr(expression, 'resolve_expression'): + raise ValueError('expression must be an expression type') + self.expression = expression + + def __repr__(self): + return "{}({}, descending={})".format( + self.__class__.__name__, self.expression, self.descending) + + def set_source_expressions(self, exprs): + self.expression = exprs[0] + + def get_source_expressions(self): + return [self.expression] + + def as_sql(self, compiler, connection, template=None, **extra_context): + if not template: + if self.nulls_last: + template = '%s NULLS LAST' % self.template + elif self.nulls_first: + template = '%s NULLS FIRST' % self.template + connection.ops.check_expression_support(self) + expression_sql, params = compiler.compile(self.expression) + placeholders = { + 'expression': expression_sql, + 'ordering': 'DESC' if self.descending else 'ASC', + } + placeholders.update(extra_context) + template = template or self.template + params *= template.count('%(expression)s') + return (template % placeholders).rstrip(), params + + def as_sqlite(self, compiler, connection): + template = None + if self.nulls_last: + template = '%(expression)s IS NULL, %(expression)s %(ordering)s' + elif self.nulls_first: + template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s' + return self.as_sql(compiler, connection, template=template) + + def as_mysql(self, compiler, connection): + template = None + if self.nulls_last: + template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s ' + elif self.nulls_first: + template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s ' + return self.as_sql(compiler, connection, template=template) + + def get_group_by_cols(self): + cols = [] + for source in self.get_source_expressions(): + cols.extend(source.get_group_by_cols()) + return cols + + def reverse_ordering(self): + self.descending = not self.descending + if self.nulls_first or self.nulls_last: + self.nulls_first = not self.nulls_first + self.nulls_last = not self.nulls_last + return self + + def asc(self): + self.descending = False + + def desc(self): + self.descending = True + + +class Window(Expression): + template = '%(expression)s OVER (%(window)s)' + # Although the main expression may either be an aggregate or an + # expression with an aggregate function, the GROUP BY that will + # be introduced in the query as a result is not desired. + contains_aggregate = False + contains_over_clause = True + filterable = False + + def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): + self.partition_by = partition_by + self.order_by = order_by + self.frame = frame + + if not getattr(expression, 'window_compatible', False): + raise ValueError( + "Expression '%s' isn't compatible with OVER clauses." % + expression.__class__.__name__ + ) + + if self.partition_by is not None: + if not isinstance(self.partition_by, (tuple, list)): + self.partition_by = (self.partition_by,) + self.partition_by = ExpressionList(*self.partition_by) + + if self.order_by is not None: + if isinstance(self.order_by, (list, tuple)): + self.order_by = ExpressionList(*self.order_by) + elif not isinstance(self.order_by, BaseExpression): + raise ValueError( + 'order_by must be either an Expression or a sequence of ' + 'expressions.' + ) + super().__init__(output_field=output_field) + self.source_expression = self._parse_expressions(expression)[0] + + def _resolve_output_field(self): + return self.source_expression.output_field + + def get_source_expressions(self): + return [self.source_expression, self.partition_by, self.order_by, self.frame] + + def set_source_expressions(self, exprs): + self.source_expression, self.partition_by, self.order_by, self.frame = exprs + + def as_sql(self, compiler, connection, function=None, template=None): + connection.ops.check_expression_support(self) + expr_sql, params = compiler.compile(self.source_expression) + window_sql, window_params = [], [] + + if self.partition_by is not None: + sql_expr, sql_params = self.partition_by.as_sql( + compiler=compiler, connection=connection, + template='PARTITION BY %(expressions)s', + ) + window_sql.extend(sql_expr) + window_params.extend(sql_params) + + if self.order_by is not None: + window_sql.append(' ORDER BY ') + order_sql, order_params = compiler.compile(self.order_by) + window_sql.extend(''.join(order_sql)) + window_params.extend(order_params) + + if self.frame: + frame_sql, frame_params = compiler.compile(self.frame) + window_sql.extend(' ' + frame_sql) + window_params.extend(frame_params) + + params.extend(window_params) + template = template or self.template + + return template % { + 'expression': expr_sql, + 'window': ''.join(window_sql).strip() + }, params + + def __str__(self): + return '{} OVER ({}{}{})'.format( + str(self.source_expression), + 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', + 'ORDER BY ' + str(self.order_by) if self.order_by else '', + str(self.frame or ''), + ) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self) + + def get_group_by_cols(self): + return [] + + +class WindowFrame(Expression): + """ + Model the frame clause in window expressions. There are two types of frame + clauses which are subclasses, however, all processing and validation (by no + means intended to be complete) is done here. Thus, providing an end for a + frame is optional (the default is UNBOUNDED FOLLOWING, which is the last + row in the frame). + """ + template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' + + def __init__(self, start=None, end=None): + self.start = start + self.end = end + + def set_source_expressions(self, exprs): + self.start, self.end = exprs + + def get_source_expressions(self): + return [Value(self.start), Value(self.end)] + + def as_sql(self, compiler, connection): + connection.ops.check_expression_support(self) + start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) + return self.template % { + 'frame_type': self.frame_type, + 'start': start, + 'end': end, + }, [] + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self) + + def get_group_by_cols(self): + return [] + + def __str__(self): + if self.start is not None and self.start < 0: + start = '%d %s' % (abs(self.start), connection.ops.PRECEDING) + elif self.start is not None and self.start == 0: + start = connection.ops.CURRENT_ROW + else: + start = connection.ops.UNBOUNDED_PRECEDING + + if self.end is not None and self.end > 0: + end = '%d %s' % (self.end, connection.ops.FOLLOWING) + elif self.end is not None and self.end == 0: + end = connection.ops.CURRENT_ROW + else: + end = connection.ops.UNBOUNDED_FOLLOWING + return self.template % { + 'frame_type': self.frame_type, + 'start': start, + 'end': end, + } + + def window_frame_start_end(self, connection, start, end): + raise NotImplementedError('Subclasses must implement window_frame_start_end().') + + +class RowRange(WindowFrame): + frame_type = 'ROWS' + + def window_frame_start_end(self, connection, start, end): + return connection.ops.window_frame_rows_start_end(start, end) + + +class ValueRange(WindowFrame): + frame_type = 'RANGE' + + def window_frame_start_end(self, connection, start, end): + return connection.ops.window_frame_range_start_end(start, end) diff --git a/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/fields/__init__.py b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/fields/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0ab271723df0b08f94bf4f6f9d960ac68425c24 --- /dev/null +++ b/A-news-Agrregation-system-master/myvenv/lib64/python3.6/site-packages/django/db/models/fields/__init__.py @@ -0,0 +1,2377 @@ +import collections +import copy +import datetime +import decimal +import itertools +import uuid +import warnings +from base64 import b64decode, b64encode +from functools import partialmethod, total_ordering + +from django import forms +from django.apps import apps +from django.conf import settings +from django.core import checks, exceptions, validators +# When the _meta object was formalized, this exception was moved to +# django.core.exceptions. It is retained here for backwards compatibility +# purposes. +from django.core.exceptions import FieldDoesNotExist # NOQA +from django.db import connection, connections, router +from django.db.models.constants import LOOKUP_SEP +from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin +from django.utils import timezone +from django.utils.datastructures import DictWrapper +from django.utils.dateparse import ( + parse_date, parse_datetime, parse_duration, parse_time, +) +from django.utils.duration import duration_string +from django.utils.encoding import force_bytes, smart_text +from django.utils.functional import Promise, cached_property +from django.utils.ipv6 import clean_ipv6_address +from django.utils.itercompat import is_iterable +from django.utils.text import capfirst +from django.utils.translation import gettext_lazy as _ + +__all__ = [ + 'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField', + 'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField', + 'DateField', 'DateTimeField', 'DecimalField', 'DurationField', + 'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField', + 'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField', + 'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField', + 'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField', + 'TimeField', 'URLField', 'UUIDField', +] + + +class Empty: + pass + + +class NOT_PROVIDED: + pass + + +# The values to use for "blank" in SelectFields. Will be appended to the start +# of most "choices" lists. +BLANK_CHOICE_DASH = [("", "---------")] + + +def _load_field(app_label, model_name, field_name): + return apps.get_model(app_label, model_name)._meta.get_field(field_name) + + +# A guide to Field parameters: +# +# * name: The name of the field specified in the model. +# * attname: The attribute to use on the model object. This is the same as +# "name", except in the case of ForeignKeys, where "_id" is +# appended. +# * db_column: The db_column specified in the model (or None). +# * column: The database column for this field. This is the same as +# "attname", except if db_column is specified. +# +# Code that introspects values, or does other dynamic things, should use +# attname. For example, this gets the primary key value of object "obj": +# +# getattr(obj, opts.pk.attname) + +def _empty(of_cls): + new = Empty() + new.__class__ = of_cls + return new + + +def return_None(): + return None + + +@total_ordering +class Field(RegisterLookupMixin): + """Base class for all field types""" + + # Designates whether empty strings fundamentally are allowed at the + # database level. + empty_strings_allowed = True + empty_values = list(validators.EMPTY_VALUES) + + # These track each time a Field instance is created. Used to retain order. + # The auto_creation_counter is used for fields that Django implicitly + # creates, creation_counter is used for all user-specified fields. + creation_counter = 0 + auto_creation_counter = -1 + default_validators = [] # Default set of validators + default_error_messages = { + 'invalid_choice': _('Value %(value)r is not a valid choice.'), + 'null': _('This field cannot be null.'), + 'blank': _('This field cannot be blank.'), + 'unique': _('%(model_name)s with this %(field_label)s ' + 'already exists.'), + # Translators: The 'lookup_type' is one of 'date', 'year' or 'month'. + # Eg: "Title must be unique for pub_date year" + 'unique_for_date': _("%(field_label)s must be unique for " + "%(date_field_label)s %(lookup_type)s."), + } + system_check_deprecated_details = None + system_check_removed_details = None + + # Field flags + hidden = False + + many_to_many = None + many_to_one = None + one_to_many = None + one_to_one = None + related_model = None + + # Generic field type description, usually overridden by subclasses + def _description(self): + return _('Field of type: %(field_type)s') % { + 'field_type': self.__class__.__name__ + } + description = property(_description) + + def __init__(self, verbose_name=None, name=None, primary_key=False, + max_length=None, unique=False, blank=False, null=False, + db_index=False, rel=None, default=NOT_PROVIDED, editable=True, + serialize=True, unique_for_date=None, unique_for_month=None, + unique_for_year=None, choices=None, help_text='', db_column=None, + db_tablespace=None, auto_created=False, validators=(), + error_messages=None): + self.name = name + self.verbose_name = verbose_name # May be set by set_attributes_from_name + self._verbose_name = verbose_name # Store original for deconstruction + self.primary_key = primary_key + self.max_length, self._unique = max_length, unique + self.blank, self.null = blank, null + self.remote_field = rel + self.is_relation = self.remote_field is not None + self.default = default + self.editable = editable + self.serialize = serialize + self.unique_for_date = unique_for_date + self.unique_for_month = unique_for_month + self.unique_for_year = unique_for_year + if isinstance(choices, collections.Iterator): + choices = list(choices) + self.choices = choices or [] + self.help_text = help_text + self.db_index = db_index + self.db_column = db_column + self._db_tablespace = db_tablespace + self.auto_created = auto_created + + # Adjust the appropriate creation counter, and save our local copy. + if auto_created: + self.creation_counter = Field.auto_creation_counter + Field.auto_creation_counter -= 1 + else: + self.creation_counter = Field.creation_counter + Field.creation_counter += 1 + + self._validators = list(validators) # Store for deconstruction later + + messages = {} + for c in reversed(self.__class__.__mro__): + messages.update(getattr(c, 'default_error_messages', {})) + messages.update(error_messages or {}) + self._error_messages = error_messages # Store for deconstruction later + self.error_messages = messages + + def __str__(self): + """ + Return "app_label.model_label.field_name" for fields attached to + models. + """ + if not hasattr(self, 'model'): + return super().__str__() + model = self.model + app = model._meta.app_label + return '%s.%s.%s' % (app, model._meta.object_name, self.name) + + def __repr__(self): + """Display the module, class, and name of the field.""" + path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__) + name = getattr(self, 'name', None) + if name is not None: + return '<%s: %s>' % (path, name) + return '<%s>' % path + + def check(self, **kwargs): + errors = [] + errors.extend(self._check_field_name()) + errors.extend(self._check_choices()) + errors.extend(self._check_db_index()) + errors.extend(self._check_null_allowed_for_primary_keys()) + errors.extend(self._check_backend_specific_checks(**kwargs)) + errors.extend(self._check_validators()) + errors.extend(self._check_deprecation_details()) + return errors + + def _check_field_name(self): + """ + Check if field name is valid, i.e. 1) does not end with an + underscore, 2) does not contain "__" and 3) is not "pk". + """ + if self.name.endswith('_'): + return [ + checks.Error( + 'Field names must not end with an underscore.', + obj=self, + id='fields.E001', + ) + ] + elif LOOKUP_SEP in self.name: + return [ + checks.Error( + 'Field names must not contain "%s".' % (LOOKUP_SEP,), + obj=self, + id='fields.E002', + ) + ] + elif self.name == 'pk': + return [ + checks.Error( + "'pk' is a reserved word that cannot be used as a field name.", + obj=self, + id='fields.E003', + ) + ] + else: + return [] + + def _check_choices(self): + if self.choices: + if isinstance(self.choices, str) or not is_iterable(self.choices): + return [ + checks.Error( + "'choices' must be an iterable (e.g., a list or tuple).", + obj=self, + id='fields.E004', + ) + ] + elif any(isinstance(choice, str) or + not is_iterable(choice) or len(choice) != 2 + for choice in self.choices): + return [ + checks.Error( + "'choices' must be an iterable containing " + "(actual value, human readable name) tuples.", + obj=self, + id='fields.E005', + ) + ] + else: + return [] + else: + return [] + + def _check_db_index(self): + if self.db_index not in (None, True, False): + return [ + checks.Error( + "'db_index' must be None, True or False.", + obj=self, + id='fields.E006', + ) + ] + else: + return [] + + def _check_null_allowed_for_primary_keys(self): + if (self.primary_key and self.null and + not connection.features.interprets_empty_strings_as_nulls): + # We cannot reliably check this for backends like Oracle which + # consider NULL and '' to be equal (and thus set up + # character-based fields a little differently). + return [ + checks.Error( + 'Primary keys must not have null=True.', + hint=('Set null=False on the field, or ' + 'remove primary_key=True argument.'), + obj=self, + id='fields.E007', + ) + ] + else: + return [] + + def _check_backend_specific_checks(self, **kwargs): + app_label = self.model._meta.app_label + for db in connections: + if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name): + return connections[db].validation.check_field(self, **kwargs) + return [] + + def _check_validators(self): + errors = [] + for i, validator in enumerate(self.validators): + if not callable(validator): + errors.append( + checks.Error( + "All 'validators' must be callable.", + hint=( + "validators[{i}] ({repr}) isn't a function or " + "instance of a validator class.".format( + i=i, repr=repr(validator), + ) + ), + obj=self, + id='fields.E008', + ) + ) + return errors + + def _check_deprecation_details(self): + if self.system_check_removed_details is not None: + return [ + checks.Error( + self.system_check_removed_details.get( + 'msg', + '%s has been removed except for support in historical ' + 'migrations.' % self.__class__.__name__ + ), + hint=self.system_check_removed_details.get('hint'), + obj=self, + id=self.system_check_removed_details.get('id', 'fields.EXXX'), + ) + ] + elif self.system_check_deprecated_details is not None: + return [ + checks.Warning( + self.system_check_deprecated_details.get( + 'msg', + '%s has been deprecated.' % self.__class__.__name__ + ), + hint=self.system_check_deprecated_details.get('hint'), + obj=self, + id=self.system_check_deprecated_details.get('id', 'fields.WXXX'), + ) + ] + return [] + + def get_col(self, alias, output_field=None): + if output_field is None: + output_field = self + if alias != self.model._meta.db_table or output_field != self: + from django.db.models.expressions import Col + return Col(alias, self, output_field) + else: + return self.cached_col + + @cached_property + def cached_col(self): + from django.db.models.expressions import Col + return Col(self.model._meta.db_table, self) + + def select_format(self, compiler, sql, params): + """ + Custom format for select clauses. For example, GIS columns need to be + selected as AsText(table.col) on MySQL as the table.col data can't be + used by Django. + """ + return sql, params + + def deconstruct(self): + """ + Return enough information to recreate the field as a 4-tuple: + + * The name of the field on the model, if contribute_to_class() has + been run. + * The import path of the field, including the class:e.g. + django.db.models.IntegerField This should be the most portable + version, so less specific may be better. + * A list of positional arguments. + * A dict of keyword arguments. + + Note that the positional or keyword arguments must contain values of + the following types (including inner values of collection types): + + * None, bool, str, int, float, complex, set, frozenset, list, tuple, + dict + * UUID + * datetime.datetime (naive), datetime.date + * top-level classes, top-level functions - will be referenced by their + full import path + * Storage instances - these have their own deconstruct() method + + This is because the values here must be serialized into a text format + (possibly new Python code, possibly JSON) and these are the only types + with encoding handlers defined. + + There's no need to return the exact way the field was instantiated this + time, just ensure that the resulting field is the same - prefer keyword + arguments over positional ones, and omit parameters with their default + values. + """ + # Short-form way of fetching all the default parameters + keywords = {} + possibles = { + "verbose_name": None, + "primary_key": False, + "max_length": None, + "unique": False, + "blank": False, + "null": False, + "db_index": False, + "default": NOT_PROVIDED, + "editable": True, + "serialize": True, + "unique_for_date": None, + "unique_for_month": None, + "unique_for_year": None, + "choices": [], + "help_text": '', + "db_column": None, + "db_tablespace": None, + "auto_created": False, + "validators": [], + "error_messages": None, + } + attr_overrides = { + "unique": "_unique", + "error_messages": "_error_messages", + "validators": "_validators", + "verbose_name": "_verbose_name", + "db_tablespace": "_db_tablespace", + } + equals_comparison = {"choices", "validators"} + for name, default in possibles.items(): + value = getattr(self, attr_overrides.get(name, name)) + # Unroll anything iterable for choices into a concrete list + if name == "choices" and isinstance(value, collections.Iterable): + value = list(value) + # Do correct kind of comparison + if name in equals_comparison: + if value != default: + keywords[name] = value + else: + if value is not default: + keywords[name] = value + # Work out path - we shorten it for known Django core fields + path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) + if path.startswith("django.db.models.fields.related"): + path = path.replace("django.db.models.fields.related", "django.db.models") + if path.startswith("django.db.models.fields.files"): + path = path.replace("django.db.models.fields.files", "django.db.models") + if path.startswith("django.db.models.fields.proxy"): + path = path.replace("django.db.models.fields.proxy", "django.db.models") + if path.startswith("django.db.models.fields"): + path = path.replace("django.db.models.fields", "django.db.models") + # Return basic info - other fields should override this. + return (self.name, path, [], keywords) + + def clone(self): + """ + Uses deconstruct() to clone a new copy of this Field. + Will not preserve any class attachments/attribute names. + """ + name, path, args, kwargs = self.deconstruct() + return self.__class__(*args, **kwargs) + + def __eq__(self, other): + # Needed for @total_ordering + if isinstance(other, Field): + return self.creation_counter == other.creation_counter + return NotImplemented + + def __lt__(self, other): + # This is needed because bisect does not take a comparison function. + if isinstance(other, Field): + return self.creation_counter < other.creation_counter + return NotImplemented + + def __hash__(self): + return hash(self.creation_counter) + + def __deepcopy__(self, memodict): + # We don't have to deepcopy very much here, since most things are not + # intended to be altered after initial creation. + obj = copy.copy(self) + if self.remote_field: + obj.remote_field = copy.copy(self.remote_field) + if hasattr(self.remote_field, 'field') and self.remote_field.field is self: + obj.remote_field.field = obj + memodict[id(self)] = obj + return obj + + def __copy__(self): + # We need to avoid hitting __reduce__, so define this + # slightly weird copy construct. + obj = Empty() + obj.__class__ = self.__class__ + obj.__dict__ = self.__dict__.copy() + return obj + + def __reduce__(self): + """ + Pickling should return the model._meta.fields instance of the field, + not a new copy of that field. So, use the app registry to load the + model and then the field back. + """ + if not hasattr(self, 'model'): + # Fields are sometimes used without attaching them to models (for + # example in aggregation). In this case give back a plain field + # instance. The code below will create a new empty instance of + # class self.__class__, then update its dict with self.__dict__ + # values - so, this is very close to normal pickle. + state = self.__dict__.copy() + # The _get_default cached_property can't be pickled due to lambda + # usage. + state.pop('_get_default', None) + return _empty, (self.__class__,), state + return _load_field, (self.model._meta.app_label, self.model._meta.object_name, + self.name) + + def get_pk_value_on_save(self, instance): + """ + Hook to generate new PK values on save. This method is called when + saving instances with no primary key value set. If this method returns + something else than None, then the returned value is used when saving + the new instance. + """ + if self.default: + return self.get_default() + return None + + def to_python(self, value): + """ + Convert the input value into the expected Python data type, raising + django.core.exceptions.ValidationError if the data can't be converted. + Return the converted value. Subclasses should override this. + """ + return value + + @cached_property + def validators(self): + """ + Some validators can't be created at field initialization time. + This method provides a way to delay their creation until required. + """ + return list(itertools.chain(self.default_validators, self._validators)) + + def run_validators(self, value): + if value in self.empty_values: + return + + errors = [] + for v in self.validators: + try: + v(value) + except exceptions.ValidationError as e: + if hasattr(e, 'code') and e.code in self.error_messages: + e.message = self.error_messages[e.code] + errors.extend(e.error_list) + + if errors: + raise exceptions.ValidationError(errors) + + def validate(self, value, model_instance): + """ + Validate value and raise ValidationError if necessary. Subclasses + should override this to provide validation logic. + """ + if not self.editable: + # Skip validation for non-editable fields. + return + + if self.choices and value not in self.empty_values: + for option_key, option_value in self.choices: + if isinstance(option_value, (list, tuple)): + # This is an optgroup, so look inside the group for + # options. + for optgroup_key, optgroup_value in option_value: + if value == optgroup_key: + return + elif value == option_key: + return + raise exceptions.ValidationError( + self.error_messages['invalid_choice'], + code='invalid_choice', + params={'value': value}, + ) + + if value is None and not self.null: + raise exceptions.ValidationError(self.error_messages['null'], code='null') + + if not self.blank and value in self.empty_values: + raise exceptions.ValidationError(self.error_messages['blank'], code='blank') + + def clean(self, value, model_instance): + """ + Convert the value's type and run validation. Validation errors + from to_python() and validate() are propagated. Return the correct + value if no error is raised. + """ + value = self.to_python(value) + self.validate(value, model_instance) + self.run_validators(value) + return value + + def db_type_parameters(self, connection): + return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_') + + def db_check(self, connection): + """ + Return the database column check constraint for this field, for the + provided connection. Works the same way as db_type() for the case that + get_internal_type() does not map to a preexisting model field. + """ + data = self.db_type_parameters(connection) + try: + return connection.data_type_check_constraints[self.get_internal_type()] % data + except KeyError: + return None + + def db_type(self, connection): + """ + Return the database column data type for this field, for the provided + connection. + """ + # The default implementation of this method looks at the + # backend-specific data_types dictionary, looking up the field by its + # "internal type". + # + # A Field class can implement the get_internal_type() method to specify + # which *preexisting* Django Field class it's most similar to -- i.e., + # a custom field might be represented by a TEXT column type, which is + # the same as the TextField Django field type, which means the custom + # field's get_internal_type() returns 'TextField'. + # + # But the limitation of the get_internal_type() / data_types approach + # is that it cannot handle database column types that aren't already + # mapped to one of the built-in Django field types. In this case, you + # can implement db_type() instead of get_internal_type() to specify + # exactly which wacky database column type you want to use. + data = self.db_type_parameters(connection) + try: + return connection.data_types[self.get_internal_type()] % data + except KeyError: + return None + + def rel_db_type(self, connection): + """ + Return the data type that a related field pointing to this field should + use. For example, this method is called by ForeignKey and OneToOneField + to determine its data type. + """ + return self.db_type(connection) + + def cast_db_type(self, connection): + """Return the data type to use in the Cast() function.""" + db_type = connection.ops.cast_data_types.get(self.get_internal_type()) + if db_type: + return db_type % self.db_type_parameters(connection) + return self.db_type(connection) + + def db_parameters(self, connection): + """ + Extension of db_type(), providing a range of different return values + (type, checks). This will look at db_type(), allowing custom model + fields to override it. + """ + type_string = self.db_type(connection) + check_string = self.db_check(connection) + return { + "type": type_string, + "check": check_string, + } + + def db_type_suffix(self, connection): + return connection.data_types_suffix.get(self.get_internal_type()) + + def get_db_converters(self, connection): + if hasattr(self, 'from_db_value'): + return [self.from_db_value] + return [] + + @property + def unique(self): + return self._unique or self.primary_key + + @property + def db_tablespace(self): + return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE + + def set_attributes_from_name(self, name): + if not self.name: + self.name = name + self.attname, self.column = self.get_attname_column() + self.concrete = self.column is not None + if self.verbose_name is None and self.name: + self.verbose_name = self.name.replace('_', ' ') + + def contribute_to_class(self, cls, name, private_only=False): + """ + Register the field with the model class it belongs to. + + If private_only is True, create a separate instance of this field + for every subclass of cls, even if cls is not an abstract model. + """ + self.set_attributes_from_name(name) + self.model = cls + if private_only: + cls._meta.add_field(self, private=True) + else: + cls._meta.add_field(self) + if self.column: + # Don't override classmethods with the descriptor. This means that + # if you have a classmethod and a field with the same name, then + # such fields can't be deferred (we don't have a check for this). + if not getattr(cls, self.attname, None): + setattr(cls, self.attname, DeferredAttribute(self.attname, cls)) + if self.choices: + setattr(cls, 'get_%s_display' % self.name, + partialmethod(cls._get_FIELD_display, field=self)) + + def get_filter_kwargs_for_object(self, obj): + """ + Return a dict that when passed as kwargs to self.model.filter(), would + yield all instances having the same value for this field as obj has. + """ + return {self.name: getattr(obj, self.attname)} + + def get_attname(self): + return self.name + + def get_attname_column(self): + attname = self.get_attname() + column = self.db_column or attname + return attname, column + + def get_internal_type(self): + return self.__class__.__name__ + + def pre_save(self, model_instance, add): + """Return field's value just before saving.""" + return getattr(model_instance, self.attname) + + def get_prep_value(self, value): + """Perform preliminary non-db specific value checks and conversions.""" + if isinstance(value, Promise): + value = value._proxy____cast() + return value + + def get_db_prep_value(self, value, connection, prepared=False): + """ + Return field's value prepared for interacting with the database backend. + + Used by the default implementations of get_db_prep_save(). + """ + if not prepared: + value = self.get_prep_value(value) + return value + + def get_db_prep_save(self, value, connection): + """Return field's value prepared for saving into a database.""" + return self.get_db_prep_value(value, connection=connection, prepared=False) + + def has_default(self): + """Return a boolean of whether this field has a default value.""" + return self.default is not NOT_PROVIDED + + def get_default(self): + """Return the default value for this field.""" + return self._get_default() + + @cached_property + def _get_default(self): + if self.has_default(): + if callable(self.default): + return self.default + return lambda: self.default + + if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls: + return return_None + return str # return empty string + + def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None): + """ + Return choices with a default blank choices included, for use + as