id
stringlengths
12
102
prompt
stringlengths
242
11.5M
relative_path
stringlengths
12
89
benedict.utils.type_util.is_json_serializable
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: python-benedict/benedict/utils/type_util.py import pathlib import re from datetime import datetime from decimal import Decimal def is_json_serializable(val):
python-benedict/benedict/utils/type_util.py
feedparser.urls.convert_to_idn
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: feedparser/feedparser/urls.py import re import urllib.parse from .html import _BaseHTMLProcessor def convert_to_idn(url): """Convert a URL to IDN notation""" # this function should only be called with a unicode string # strategy: if the host cannot be encoded in ascii, then # it'll be necessary to encode it in idn form
feedparser/feedparser/urls.py
mistune.toc.add_toc_hook
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mistune/src/mistune/toc.py def render_toc_ul(toc): """Render a <ul> table of content HTML. The param "toc" should be formatted into this structure:: [ (level, id, text), ] For example:: [ (1, 'toc-intro', 'Introduction'), (2, 'toc-install', 'Install'), (2, 'toc-upgrade', 'Upgrade'), (1, 'toc-license', 'License'), ] """ if not toc: return '' s = '<ul>\n' levels = [] for level, k, text in toc: item = '<a href="#{}">{}</a>'.format(k, text) if not levels: s += '<li>' + item levels.append(level) elif level == levels[-1]: s += '</li>\n<li>' + item elif level > levels[-1]: s += '\n<ul>\n<li>' + item levels.append(level) else: levels.pop() while levels: last_level = levels.pop() if level == last_level: s += '</li>\n</ul>\n</li>\n<li>' + item levels.append(level) break elif level > last_level: s += '</li>\n<li>' + item levels.append(last_level) levels.append(level) break else: s += '</li>\n</ul>\n' else: levels.append(level) s += '</li>\n<li>' + item while len(levels) > 1: s += '</li>\n</ul>\n' levels.pop() return s + '</li>\n</ul>\n' # FILE mistune/src/mistune/toc.py def normalize_toc_item(md, token): text = token['text'] tokens = md.inline(text, {}) html = md.renderer(tokens, {}) text = striptags(html) attrs = token['attrs'] return attrs['level'], attrs['id'], text # FILE mistune/src/mistune/util.py def striptags(s: str): return _striptags_re.sub('', s) Based on the information above, please complete the function: #CURRENT_FILE: mistune/src/mistune/toc.py from .util import striptags def add_toc_hook(md, min_level=1, max_level=3, heading_id=None): """Add a hook to save toc items into ``state.env``. This is usually helpful for doc generator:: import mistune from mistune.toc import add_toc_hook, render_toc_ul md = mistune.create_markdown(...) add_toc_hook(md) html, state = md.parse(text) toc_items = state.env['toc_items'] toc_html = render_toc_ul(toc_items) :param md: Markdown instance :param min_level: min heading level :param max_level: max heading level :param heading_id: a function to generate heading_id """
mistune/src/mistune/toc.py
mistune.plugins.table.table_in_quote
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mistune/src/mistune/plugins/table.py def table_in_list(md): """Enable table plugin in list.""" md.block.insert_rule(md.block.list_rules, 'table', before='paragraph') md.block.insert_rule(md.block.list_rules, 'nptable', before='paragraph') # FILE mistune/src/mistune/plugins/table.py def table(md): """A mistune plugin to support table, spec defined at https://michelf.ca/projects/php-markdown/extra/#table Here is an example: .. code-block:: text First Header | Second Header ------------- | ------------- Content Cell | Content Cell Content Cell | Content Cell :param md: Markdown instance """ md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph') md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph') if md.renderer and md.renderer.NAME == 'html': md.renderer.register('table', render_table) md.renderer.register('table_head', render_table_head) md.renderer.register('table_body', render_table_body) md.renderer.register('table_row', render_table_row) md.renderer.register('table_cell', render_table_cell) # FILE mistune/src/mistune/plugins/table.py def render_table_head(renderer, text): return '<thead>\n<tr>\n' + text + '</tr>\n</thead>\n' # FILE mistune/src/mistune/plugins/table.py def render_table_body(renderer, text): return '<tbody>\n' + text + '</tbody>\n' # FILE mistune/src/mistune/plugins/table.py def render_table(renderer, text): return '<table>\n' + text + '</table>\n' # FILE mistune/src/mistune/plugins/table.py def render_table_row(renderer, text): return '<tr>\n' + text + '</tr>\n' # FILE mistune/src/mistune/plugins/table.py def render_table_cell(renderer, text, align=None, head=False): if head: tag = 'th' else: tag = 'td' html = ' <' + tag if align: html += ' style="text-align:' + align + '"' return html + '>' + text + '</' + tag + '>\n' Based on the information above, please complete the function: #CURRENT_FILE: mistune/src/mistune/plugins/table.py import re from ..helpers import PREVENT_BACKSLASH def table_in_quote(md): """Enable table plugin in block quotes."""
mistune/src/mistune/plugins/table.py
mistune.plugins.table.table_in_list
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mistune/src/mistune/plugins/table.py def table_in_quote(md): """Enable table plugin in block quotes.""" md.block.insert_rule(md.block.block_quote_rules, 'table', before='paragraph') md.block.insert_rule(md.block.block_quote_rules, 'nptable', before='paragraph') # FILE mistune/src/mistune/plugins/table.py def table(md): """A mistune plugin to support table, spec defined at https://michelf.ca/projects/php-markdown/extra/#table Here is an example: .. code-block:: text First Header | Second Header ------------- | ------------- Content Cell | Content Cell Content Cell | Content Cell :param md: Markdown instance """ md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph') md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph') if md.renderer and md.renderer.NAME == 'html': md.renderer.register('table', render_table) md.renderer.register('table_head', render_table_head) md.renderer.register('table_body', render_table_body) md.renderer.register('table_row', render_table_row) md.renderer.register('table_cell', render_table_cell) Based on the information above, please complete the function: #CURRENT_FILE: mistune/src/mistune/plugins/table.py import re from ..helpers import PREVENT_BACKSLASH def table_in_list(md): """Enable table plugin in list."""
mistune/src/mistune/plugins/table.py
xmnlp.utils.parallel_handler
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: xmnlp/xmnlp/utils/__init__.py import os import re import concurrent.futures as futures from functools import partial from typing import Any, Callable, List, Generator import numpy as np def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[ List[Any], None, None ]: """parallel handler Args: callback: callback function texts: List[str] n_jobs: int, pool size of threads Return: Generator[List[str]] """
xmnlp/xmnlp/utils/__init__.py
parsel.utils.shorten
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: parsel/parsel/utils.py import re from typing import Any, Iterable, Iterator, List, Match, Pattern, Union, cast from w3lib.html import replace_entities as w3lib_replace_entities def shorten(text: str, width: int, suffix: str = "...") -> str: """Truncate the given text to fit in the given width."""
parsel/parsel/utils.py
parsel.xpathfuncs.set_xpathfunc
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE parsel/parsel/xpathfuncs.py def setup() -> None: set_xpathfunc("has-class", has_class) Based on the information above, please complete the function: #CURRENT_FILE: parsel/parsel/xpathfuncs.py import re from typing import Any, Callable, Optional from lxml import etree from w3lib.html import HTML5_WHITESPACE def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg] """Register a custom extension function to use in XPath expressions. The function ``func`` registered under ``fname`` identifier will be called for every matching node, being passed a ``context`` parameter as well as any parameters passed from the corresponding XPath expression. If ``func`` is ``None``, the extension function will be removed. See more `in lxml documentation`_. .. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions """
parsel/parsel/xpathfuncs.py
dominate.dom_tag._get_thread_context
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE dominate/dominate/dom_tag.py def attr(*args, **kwargs): ''' Set attributes on the current active tag context ''' c = get_current() dicts = args + (kwargs,) for d in dicts: for attr, value in d.items(): c.set_attribute(*dom_tag.clean_pair(attr, value)) Based on the information above, please complete the function: #CURRENT_FILE: dominate/dominate/dom_tag.py import copy import numbers from collections import defaultdict, namedtuple from functools import wraps import threading from collections.abc import Callable from collections import Callable import greenlet from . import util def _get_thread_context():
dominate/dominate/dom_tag.py
dominate.util.system
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: dominate/dominate/util.py import re from .dom_tag import dom_tag import subprocess def system(cmd, data=None): ''' pipes the output of a program '''
dominate/dominate/util.py
dominate.util.url_unescape
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE dominate/dominate/util.py def unescape(data): ''' unescapes html entities. the opposite of escape. ''' cc = re.compile(r'&(?:(?:#(\d+))|([^;]+));') result = [] m = cc.search(data) while m: result.append(data[0:m.start()]) d = m.group(1) if d: d = int(d) result.append(unichr(d)) else: d = _unescape.get(m.group(2), ord('?')) result.append(unichr(d)) data = data[m.end():] m = cc.search(data) result.append(data) return ''.join(result) # FILE dominate/dominate/util.py def escape(data, quote=True): # stolen from std lib cgi ''' Escapes special characters into their html entities Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated. This is used to escape content that appears in the body of an HTML document ''' data = data.replace("&", "&amp;") # Must be done first! data = data.replace("<", "&lt;") data = data.replace(">", "&gt;") if quote: data = data.replace('"', "&quot;") return data Based on the information above, please complete the function: #CURRENT_FILE: dominate/dominate/util.py import re from .dom_tag import dom_tag import subprocess def url_unescape(data):
dominate/dominate/util.py
rows.fields.DatetimeField.serialize
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE rows/rows/fields.py def fields(self): possible, skip = self._possible_types, self._skip if possible: # Create a header with placeholder values for each detected column # and then join this placeholders with original header - the # original header may have less columns then the detected ones, so # we end with a full header having a name for every possible # column. placeholders = make_header(range(max(possible.keys()) + 1)) header = [a or b for a, b in zip_longest(self.field_names, placeholders)] else: header = self.field_names return OrderedDict( [ ( field_name, self.priority(*(possible[index] if index in possible else [])), ) for index, field_name in enumerate(header) if index not in skip ] ) # LIB six.py def b(s): return s # LIB six.py def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") Based on the information above, please complete the function: #CURRENT_FILE: rows/rows/fields.py from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six from itertools import izip_longest as zip_longest from itertools import zip_longest class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ TYPE = (datetime.datetime,) DATETIME_REGEXP = re.compile( "^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$" ) @classmethod def serialize(cls, value, *args, **kwargs):
rows/rows/fields.py
rows.fields.Field.serialize
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE rows/rows/fields.py def is_null(value): if value is None: return True elif type(value) is six.binary_type: value = value.strip().lower() return not value or value in NULL_BYTES else: value_str = as_string(value).strip().lower() return not value_str or value_str in NULL # FILE rows/rows/fields.py def detect_types( field_names, field_values, field_types=DEFAULT_TYPES, skip_indexes=None, type_detector=TypeDetector, fallback_type=TextField, *args, **kwargs ): """Detect column types (or "where the magic happens")""" # TODO: look strategy of csv.Sniffer.has_header # TODO: may receive 'type hints' detector = type_detector( field_names, field_types=field_types, fallback_type=fallback_type, skip_indexes=skip_indexes, ) detector.feed(field_values) return detector.fields # FILE rows/rows/fields.py def fields(self): possible, skip = self._possible_types, self._skip if possible: # Create a header with placeholder values for each detected column # and then join this placeholders with original header - the # original header may have less columns then the detected ones, so # we end with a full header having a name for every possible # column. placeholders = make_header(range(max(possible.keys()) + 1)) header = [a or b for a, b in zip_longest(self.field_names, placeholders)] else: header = self.field_names return OrderedDict( [ ( field_name, self.priority(*(possible[index] if index in possible else [])), ) for index, field_name in enumerate(header) if index not in skip ] ) # FILE rows/rows/fields.py class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ TYPE = (datetime.datetime,) DATETIME_REGEXP = re.compile( "^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$" ) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.isoformat()) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DatetimeField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) # TODO: may use iso8601 groups = cls.DATETIME_REGEXP.findall(value) if not groups: value_error(value, cls) else: return datetime.datetime(*[int(x) for x in groups[0]]) # LIB six.py def ensure_binary(s, encoding='utf-8', errors='strict'): """Coerce **s** to six.binary_type. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> encoded to `bytes` - `bytes` -> `bytes` """ if isinstance(s, binary_type): return s if isinstance(s, text_type): return s.encode(encoding, errors) raise TypeError("not expecting type '%s'" % type(s)) # FILE rows/rows/fields.py class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): ... def deserialize(cls, value, *args, **kwargs): value = super(DatetimeField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) # TODO: may use iso8601 groups = cls.DATETIME_REGEXP.findall(value) if not groups: value_error(value, cls) else: return datetime.datetime(*[int(x) for x in groups[0]]) # FILE rows/rows/fields.py class IntegerField(Field): """Field class to represent integer Is locale-aware """ def serialize(cls, value, *args, **kwargs): ... def deserialize(cls, value, *args, **kwargs): value = super(IntegerField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif isinstance(value, float): new_value = int(value) if new_value != value: raise ValueError("It's float, not integer") else: value = new_value value = as_string(value) if value != "0" and value.startswith("0"): raise ValueError("It's string, not integer") return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value) # LIB six.py def ensure_text(s, encoding='utf-8', errors='strict'): """Coerce *s* to six.text_type. For Python 2: - `unicode` -> `unicode` - `str` -> `unicode` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str` """ if isinstance(s, binary_type): return s.decode(encoding, errors) elif isinstance(s, text_type): return s else: raise TypeError("not expecting type '%s'" % type(s)) # FILE rows/rows/fields.py class IntegerField(Field): """Field class to represent integer Is locale-aware """ TYPE = (int,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%d", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(IntegerField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif isinstance(value, float): new_value = int(value) if new_value != value: raise ValueError("It's float, not integer") else: value = new_value value = as_string(value) if value != "0" and value.startswith("0"): raise ValueError("It's string, not integer") return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value) # FILE rows/rows/fields.py class DateField(Field): """Field class to represent date Is not locale-aware (does not need to be) """ TYPE = (datetime.date,) INPUT_FORMAT = "%Y-%m-%d" OUTPUT_FORMAT = "%Y-%m-%d" @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.strftime(cls.OUTPUT_FORMAT)) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DateField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT) return datetime.date(dt_object.year, dt_object.month, dt_object.day) # FILE rows/rows/fields.py class JSONField(Field): """Field class to represent JSON-encoded strings Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): ... def deserialize(cls, value, *args, **kwargs): value = super(JSONField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value else: return json.loads(value) # FILE rows/rows/fields.py class JSONField(Field): """Field class to represent JSON-encoded strings Is not locale-aware (does not need to be) """ TYPE = (list, dict) @classmethod def serialize(cls, value, *args, **kwargs): return json.dumps(value) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(JSONField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value else: return json.loads(value) # FILE rows/rows/fields.py class FloatField(Field): """Field class to represent float Is locale-aware """ def serialize(cls, value, *args, **kwargs): ... def deserialize(cls, value, *args, **kwargs): value = super(FloatField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) if SHOULD_NOT_USE_LOCALE: return float(value) else: return locale.atof(value) # FILE rows/rows/fields.py class FloatField(Field): """Field class to represent float Is locale-aware """ TYPE = (float,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%f", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(FloatField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) if SHOULD_NOT_USE_LOCALE: return float(value) else: return locale.atof(value) # FILE rows/rows/fields.py class BinaryField(Field): """Field class to represent byte arrays Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): ... def deserialize(cls, value, *args, **kwargs): if value is not None: if isinstance(value, six.binary_type): return value elif isinstance(value, six.text_type): try: return b64decode(value) except (TypeError, ValueError, binascii.Error): raise ValueError("Can't decode base64") else: value_error(value, cls) else: return b"" # FILE rows/rows/fields.py class TextField(Field): """Field class to represent unicode strings Is not locale-aware (does not need to be) """ def deserialize(cls, value, *args, **kwargs): if value is None or isinstance(value, cls.TYPE): return value else: return as_string(value) # FILE rows/rows/fields.py class BoolField(Field): """Base class to representing boolean Is not locale-aware (if you need to, please customize by changing its attributes like `TRUE_VALUES` and `FALSE_VALUES`) """ TYPE = (bool,) SERIALIZED_VALUES = {True: "true", False: "false", None: ""} TRUE_VALUES = ("true", "yes") FALSE_VALUES = ("false", "no") @classmethod def serialize(cls, value, *args, **kwargs): # TODO: should we serialize `None` as well or give it to the plugin? return cls.SERIALIZED_VALUES[value] @classmethod def deserialize(cls, value, *args, **kwargs): value = super(BoolField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value).lower() if value in cls.TRUE_VALUES: return True elif value in cls.FALSE_VALUES: return False else: raise ValueError("Value is not boolean") # FILE rows/rows/fields.py def as_string(value): if isinstance(value, six.binary_type): raise ValueError("Binary is not supported") elif isinstance(value, six.text_type): return value else: return six.text_type(value) # FILE rows/rows/fields.py class BinaryField(Field): """Field class to represent byte arrays Is not locale-aware (does not need to be) """ TYPE = (six.binary_type,) @classmethod def serialize(cls, value, *args, **kwargs): if value is not None: if not isinstance(value, six.binary_type): value_error(value, cls) else: try: return b64encode(value).decode("ascii") except (TypeError, binascii.Error): return value else: return "" @classmethod def deserialize(cls, value, *args, **kwargs): if value is not None: if isinstance(value, six.binary_type): return value elif isinstance(value, six.text_type): try: return b64decode(value) except (TypeError, ValueError, binascii.Error): raise ValueError("Can't decode base64") else: value_error(value, cls) else: return b"" # FILE rows/rows/fields.py class TextField(Field): """Field class to represent unicode strings Is not locale-aware (does not need to be) """ TYPE = (six.text_type,) @classmethod def deserialize(cls, value, *args, **kwargs): if value is None or isinstance(value, cls.TYPE): return value else: return as_string(value) # LIB six.py def b(s): return s # LIB six.py def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") # FILE rows/rows/fields.py class DateField(Field): """Field class to represent date Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.strftime(cls.OUTPUT_FORMAT)) def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class PercentField(DecimalField): """Field class to represent percent values Is locale-aware (inherit this behaviour from `rows.DecimalField`) """ def serialize(cls, value, *args, **kwargs): if value is None: return "" elif value == Decimal("0"): return "0.00%" value = Decimal(six.text_type(value * 100)[:-2]) value = super(PercentField, cls).serialize(value, *args, **kwargs) return "{}%".format(value) def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class BoolField(Field): """Base class to representing boolean Is not locale-aware (if you need to, please customize by changing its attributes like `TRUE_VALUES` and `FALSE_VALUES`) """ def serialize(cls, value, *args, **kwargs): # TODO: should we serialize `None` as well or give it to the plugin? return cls.SERIALIZED_VALUES[value] def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class IntegerField(Field): """Field class to represent integer Is locale-aware """ def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%d", value, grouping=grouping) def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class EmailField(TextField): """Field class to represent e-mail addresses Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value) def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class DecimalField(Field): """Field class to represent decimal data (as Python's decimal.Decimal) Is locale-aware """ def serialize(cls, value, *args, **kwargs): if value is None: return "" value_as_string = six.text_type(value) if SHOULD_NOT_USE_LOCALE: return value_as_string else: grouping = kwargs.get("grouping", None) has_decimal_places = value_as_string.find(".") != -1 if not has_decimal_places: string_format = "%d" else: decimal_places = len(value_as_string.split(".")[1]) string_format = "%.{}f".format(decimal_places) return locale.format(string_format, value, grouping=grouping) def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class BinaryField(Field): """Field class to represent byte arrays Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): if value is not None: if not isinstance(value, six.binary_type): value_error(value, cls) else: try: return b64encode(value).decode("ascii") except (TypeError, binascii.Error): return value else: return "" def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class FloatField(Field): """Field class to represent float Is locale-aware """ def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%f", value, grouping=grouping) def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class JSONField(Field): """Field class to represent JSON-encoded strings Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): return json.dumps(value) def deserialize(cls, value, *args, **kwargs): ... # FILE rows/rows/fields.py class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.isoformat()) def deserialize(cls, value, *args, **kwargs): ... Based on the information above, please complete the function: #CURRENT_FILE: rows/rows/fields.py from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six from itertools import izip_longest as zip_longest from itertools import zip_longest class Field(object): """Base Field class - all fields should inherit from this As the fallback for all other field types are the BinaryField, this Field actually implements what is expected in the BinaryField """ TYPE = (type(None),) @classmethod def serialize(cls, value, *args, **kwargs): """Serialize a value to be exported `cls.serialize` should always return an unicode value, except for BinaryField """
rows/rows/fields.py
rows.fields.EmailField.serialize
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE rows/rows/fields.py def unique_values(values): result = [] for value in values: if not is_null(value) and value not in result: result.append(value) return result # FILE rows/rows/fields.py def detect_types( field_names, field_values, field_types=DEFAULT_TYPES, skip_indexes=None, type_detector=TypeDetector, fallback_type=TextField, *args, **kwargs ): """Detect column types (or "where the magic happens")""" # TODO: look strategy of csv.Sniffer.has_header # TODO: may receive 'type hints' detector = type_detector( field_names, field_types=field_types, fallback_type=fallback_type, skip_indexes=skip_indexes, ) detector.feed(field_values) return detector.fields # FILE rows/rows/fields.py def is_null(value): if value is None: return True elif type(value) is six.binary_type: value = value.strip().lower() return not value or value in NULL_BYTES else: value_str = as_string(value).strip().lower() return not value_str or value_str in NULL # FILE rows/rows/fields.py class TextField(Field): """Field class to represent unicode strings Is not locale-aware (does not need to be) """ TYPE = (six.text_type,) @classmethod def deserialize(cls, value, *args, **kwargs): if value is None or isinstance(value, cls.TYPE): return value else: return as_string(value) # FILE rows/rows/fields.py def as_string(value): if isinstance(value, six.binary_type): raise ValueError("Binary is not supported") elif isinstance(value, six.text_type): return value else: return six.text_type(value) # LIB six.py def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") Based on the information above, please complete the function: #CURRENT_FILE: rows/rows/fields.py from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six from itertools import izip_longest as zip_longest from itertools import zip_longest class EmailField(TextField): """Field class to represent e-mail addresses Is not locale-aware (does not need to be) """ EMAIL_REGEXP = re.compile( r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE ) @classmethod def serialize(cls, value, *args, **kwargs):
rows/rows/fields.py
rows.fields.as_string
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE rows/rows/fields.py def fields(self): possible, skip = self._possible_types, self._skip if possible: # Create a header with placeholder values for each detected column # and then join this placeholders with original header - the # original header may have less columns then the detected ones, so # we end with a full header having a name for every possible # column. placeholders = make_header(range(max(possible.keys()) + 1)) header = [a or b for a, b in zip_longest(self.field_names, placeholders)] else: header = self.field_names return OrderedDict( [ ( field_name, self.priority(*(possible[index] if index in possible else [])), ) for index, field_name in enumerate(header) if index not in skip ] ) # FILE rows/rows/fields.py class EmailField(TextField): """Field class to represent e-mail addresses Is not locale-aware (does not need to be) """ def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value) def deserialize(cls, value, *args, **kwargs): ... # LIB six.py def b(s): return s # LIB six.py def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") Based on the information above, please complete the function: #CURRENT_FILE: rows/rows/fields.py from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six from itertools import izip_longest as zip_longest from itertools import zip_longest def as_string(value):
rows/rows/fields.py
rows.fields.get_items
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE rows/rows/fields.py def fields(self): possible, skip = self._possible_types, self._skip if possible: # Create a header with placeholder values for each detected column # and then join this placeholders with original header - the # original header may have less columns then the detected ones, so # we end with a full header having a name for every possible # column. placeholders = make_header(range(max(possible.keys()) + 1)) header = [a or b for a, b in zip_longest(self.field_names, placeholders)] else: header = self.field_names return OrderedDict( [ ( field_name, self.priority(*(possible[index] if index in possible else [])), ) for index, field_name in enumerate(header) if index not in skip ] ) # LIB six.py def b(s): return s # LIB six.py def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") # LIB six.py def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) Based on the information above, please complete the function: #CURRENT_FILE: rows/rows/fields.py from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six from itertools import izip_longest as zip_longest from itertools import zip_longest def get_items(*indexes): """Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError). """
rows/rows/fields.py
pycorrector.proper_corrector.load_dict_file
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: pycorrector/pycorrector/proper_corrector.py import os from codecs import open import pypinyin from loguru import logger from pycorrector import config from pycorrector.utils.math_utils import edit_distance from pycorrector.utils.ngram_util import NgramUtil from pycorrector.utils.text_utils import is_chinese from pycorrector.utils.tokenizer import segment, split_2_short_text def load_dict_file(path): """ 加载词典 :param path: :return: """
pycorrector/pycorrector/proper_corrector.py
natasha.span.envelop_spans
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE natasha/natasha/span.py def adapt_spans(spans): for span in spans: yield Span(span.start, span.stop, span.type) # FILE natasha/natasha/span.py class Span(Record): __attributes__ = ['start', 'stop', 'type'] Based on the information above, please complete the function: #CURRENT_FILE: natasha/natasha/span.py from .record import Record def envelop_spans(spans, envelopes):
natasha/natasha/span.py
googleapiclient._helpers.parse_unique_urlencoded
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE google-api-python-client/googleapiclient/_helpers.py def _add_query_parameter(url, name, value): """Adds a query parameter to a url. Replaces the current value if it already exists in the URL. Args: url: string, url to add the query parameter to. name: string, query parameter name. value: string, query parameter value. Returns: Updated query parameter. Does not update the url if value is None. """ if value is None: return url else: return update_query_params(url, {name: value}) # FILE google-api-python-client/googleapiclient/_helpers.py def update_query_params(uri, params): """Updates a URI with new query parameters. If a given key from ``params`` is repeated in the ``uri``, then the URI will be considered invalid and an error will occur. If the URI is valid, then each value from ``params`` will replace the corresponding value in the query parameters (if it exists). Args: uri: string, A valid URI, with potential existing query parameters. params: dict, A dictionary of query parameters. Returns: The same URI but with the new query parameters added. """ parts = urllib.parse.urlparse(uri) query_params = parse_unique_urlencoded(parts.query) query_params.update(params) new_query = urllib.parse.urlencode(query_params) new_parts = parts._replace(query=new_query) return urllib.parse.urlunparse(new_parts) Based on the information above, please complete the function: #CURRENT_FILE: google-api-python-client/googleapiclient/_helpers.py import functools import inspect import logging import urllib def parse_unique_urlencoded(content): """Parses unique key-value parameters from urlencoded content. Args: content: string, URL-encoded key-value pairs. Returns: dict, The key-value pairs from ``content``. Raises: ValueError: if one of the keys is repeated. """
google-api-python-client/googleapiclient/_helpers.py
jinja2.async_utils.auto_aiter
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE Jinja2/src/jinja2/async_utils.py async def auto_to_list( value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", ) -> t.List["V"]: return [x async for x in auto_aiter(value)] # FILE Jinja2/src/jinja2/async_utils.py async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V": # Avoid a costly call to isawaitable if type(value) in _common_primitives: return t.cast("V", value) if inspect.isawaitable(value): return await t.cast("t.Awaitable[V]", value) return t.cast("V", value) Based on the information above, please complete the function: #CURRENT_FILE: Jinja2/src/jinja2/async_utils.py import inspect import typing as t from functools import WRAPPER_ASSIGNMENTS from functools import wraps from .utils import _PassArg from .utils import pass_eval_context async def auto_aiter( iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", ) -> "t.AsyncIterator[V]":
Jinja2/src/jinja2/async_utils.py
jinja2.utils.consume
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE Jinja2/src/jinja2/utils.py class LRUCache: """A simple LRU Cache implementation.""" def __init__(self, capacity: int) -> None: self.capacity = capacity self._mapping: t.Dict[t.Any, t.Any] = {} self._queue: "te.Deque[t.Any]" = deque() self._postinit() def _postinit(self) -> None: ... def __getstate__(self) -> t.Mapping[str, t.Any]: ... def __setstate__(self, d: t.Mapping[str, t.Any]) -> None: ... def __getnewargs__(self) -> t.Tuple: ... def copy(self) -> "LRUCache": """Return a shallow copy of the instance.""" ... def get(self, key: t.Any, default: t.Any = None) -> t.Any: """Return an item from the cache dict or `default`""" ... def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any: """Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key. """ ... def clear(self) -> None: """Clear the cache.""" ... def __contains__(self, key: t.Any) -> bool: """Check if a key exists in this cache.""" ... def __len__(self) -> int: """Return the current size of the cache.""" ... def __repr__(self) -> str: ... def __getitem__(self, key: t.Any) -> t.Any: """Get an item from the cache. Moves the item up so that it has the highest priority then. Raise a `KeyError` if it does not exist. """ ... def __setitem__(self, key: t.Any, value: t.Any) -> None: """Sets the value for an item. Moves the item up so that it has the highest priority then. """ ... def __delitem__(self, key: t.Any) -> None: """Remove an item from the cache dict. Raise a `KeyError` if it does not exist. """ ... def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]: """Return a list of items.""" ... def values(self) -> t.Iterable[t.Any]: """Return a list of all values.""" return [x[1] for x in self.items()] def keys(self) -> t.Iterable[t.Any]: """Return a list of all keys ordered by most recent usage.""" ... def __iter__(self) -> t.Iterator[t.Any]: ... def __reversed__(self) -> t.Iterator[t.Any]: """Iterate over the keys in the cache dict, oldest items coming first. """ ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: ... def __str__(self) -> str: ... def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: yield from () def __bool__(self) -> bool: ... def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/utils.py class LRUCache: """A simple LRU Cache implementation.""" def __init__(self, capacity: int) -> None: self.capacity = capacity self._mapping: t.Dict[t.Any, t.Any] = {} self._queue: "te.Deque[t.Any]" = deque() self._postinit() def _postinit(self) -> None: ... def __getstate__(self) -> t.Mapping[str, t.Any]: ... def __setstate__(self, d: t.Mapping[str, t.Any]) -> None: ... def __getnewargs__(self) -> t.Tuple: ... def copy(self) -> "LRUCache": """Return a shallow copy of the instance.""" ... def get(self, key: t.Any, default: t.Any = None) -> t.Any: """Return an item from the cache dict or `default`""" ... def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any: """Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key. """ ... def clear(self) -> None: """Clear the cache.""" ... def __contains__(self, key: t.Any) -> bool: """Check if a key exists in this cache.""" ... def __len__(self) -> int: """Return the current size of the cache.""" ... def __repr__(self) -> str: ... def __getitem__(self, key: t.Any) -> t.Any: """Get an item from the cache. Moves the item up so that it has the highest priority then. Raise a `KeyError` if it does not exist. """ ... def __setitem__(self, key: t.Any, value: t.Any) -> None: """Sets the value for an item. Moves the item up so that it has the highest priority then. """ ... def __delitem__(self, key: t.Any) -> None: """Remove an item from the cache dict. Raise a `KeyError` if it does not exist. """ ... def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]: """Return a list of items.""" result = [(key, self._mapping[key]) for key in list(self._queue)] result.reverse() return result def values(self) -> t.Iterable[t.Any]: """Return a list of all values.""" ... def keys(self) -> t.Iterable[t.Any]: """Return a list of all keys ordered by most recent usage.""" ... def __iter__(self) -> t.Iterator[t.Any]: ... def __reversed__(self) -> t.Iterator[t.Any]: """Iterate over the keys in the cache dict, oldest items coming first. """ ... # FILE Jinja2/src/jinja2/utils.py class LRUCache: """A simple LRU Cache implementation.""" def __init__(self, capacity: int) -> None: self.capacity = capacity self._mapping: t.Dict[t.Any, t.Any] = {} self._queue: "te.Deque[t.Any]" = deque() self._postinit() def _postinit(self) -> None: ... def __getstate__(self) -> t.Mapping[str, t.Any]: ... def __setstate__(self, d: t.Mapping[str, t.Any]) -> None: ... def __getnewargs__(self) -> t.Tuple: ... def copy(self) -> "LRUCache": """Return a shallow copy of the instance.""" ... def get(self, key: t.Any, default: t.Any = None) -> t.Any: """Return an item from the cache dict or `default`""" ... def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any: """Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key. """ ... def clear(self) -> None: """Clear the cache.""" ... def __contains__(self, key: t.Any) -> bool: """Check if a key exists in this cache.""" ... def __len__(self) -> int: """Return the current size of the cache.""" ... def __repr__(self) -> str: ... def __getitem__(self, key: t.Any) -> t.Any: """Get an item from the cache. Moves the item up so that it has the highest priority then. Raise a `KeyError` if it does not exist. """ ... def __setitem__(self, key: t.Any, value: t.Any) -> None: """Sets the value for an item. Moves the item up so that it has the highest priority then. """ ... def __delitem__(self, key: t.Any) -> None: """Remove an item from the cache dict. Raise a `KeyError` if it does not exist. """ ... def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]: """Return a list of items.""" ... def values(self) -> t.Iterable[t.Any]: """Return a list of all values.""" ... def keys(self) -> t.Iterable[t.Any]: """Return a list of all keys ordered by most recent usage.""" ... def __iter__(self) -> t.Iterator[t.Any]: return reversed(tuple(self._queue)) def __reversed__(self) -> t.Iterator[t.Any]: """Iterate over the keys in the cache dict, oldest items coming first. """ ... # FILE Jinja2/src/jinja2/utils.py class Cycler: """Cycle through values by yield them one at a time, then restarting once the end is reached. Available as ``cycler`` in templates. Similar to ``loop.cycle``, but can be used outside loops or across multiple loops. For example, render a list of folders and files in a list, alternating giving them "odd" and "even" classes. .. code-block:: html+jinja {% set row_class = cycler("odd", "even") %} <ul class="browser"> {% for folder in folders %} <li class="folder {{ row_class.next() }}">{{ folder }} {% endfor %} {% for file in files %} <li class="file {{ row_class.next() }}">{{ file }} {% endfor %} </ul> :param items: Each positional argument will be yielded in the order given for each cycle. .. versionadded:: 2.1 """ def __init__(self, *items: t.Any) -> None: if not items: raise RuntimeError("at least one item has to be provided") self.items = items self.pos = 0 def reset(self) -> None: """Resets the current item to the first item.""" ... def current(self) -> t.Any: """Return the current item. Equivalent to the item that will be returned next time :meth:`next` is called. """ ... def next(self) -> t.Any: """Return the current item, then advance :attr:`current` to the next item. """ rv = self.current self.pos = (self.pos + 1) % len(self.items) return rv Based on the information above, please complete the function: #CURRENT_FILE: Jinja2/src/jinja2/utils.py import enum import json import os import re import typing as t from collections import abc from collections import deque from random import choice from random import randrange from threading import Lock from types import CodeType from urllib.parse import quote_from_bytes import markupsafe import typing_extensions as te from .runtime import Undefined from .environment import get_spontaneous_environment from .lexer import _lexer_cache from pprint import pformat from .constants import LOREM_IPSUM_WORDS def consume(iterable: t.Iterable[t.Any]) -> None: """Consumes an iterable without doing anything with it."""
Jinja2/src/jinja2/utils.py
pycorrector.utils.tokenizer.segment
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pycorrector/pycorrector/utils/tokenizer.py def split_text_by_maxlen(text, maxlen=512): """ 文本切分为句子,以句子maxlen切分 :param text: str :param maxlen: int, 最大长度 :return: list, (sentence, idx) """ result = [] for i in range(0, len(text), maxlen): result.append((text[i:i + maxlen], i)) return result # FILE pycorrector/pycorrector/utils/tokenizer.py class Tokenizer(object): def __init__(self, dict_path='', custom_word_freq_dict=None, custom_confusion_dict=None): self.model = jieba jieba.setLogLevel("ERROR") # 初始化大词典 if os.path.exists(dict_path): self.model.set_dictionary(dict_path) # 加载用户自定义词典 if custom_word_freq_dict: for w, f in custom_word_freq_dict.items(): self.model.add_word(w, freq=f) # 加载混淆集词典 if custom_confusion_dict: for k, word in custom_confusion_dict.items(): # 添加到分词器的自定义词典中 self.model.add_word(k) self.model.add_word(word) def tokenize(self, unicode_sentence, mode="search"): """ 切词并返回切词位置, search mode用于错误扩召回 :param unicode_sentence: query :param mode: search, default, ngram :param HMM: enable HMM :return: (w, start, start + width) model='default' """ if mode == 'ngram': n = 2 result_set = set() tokens = self.model.lcut(unicode_sentence) tokens_len = len(tokens) start = 0 for i in range(0, tokens_len): w = tokens[i] width = len(w) result_set.add((w, start, start + width)) for j in range(i, i + n): gram = "".join(tokens[i:j + 1]) gram_width = len(gram) if i + j > tokens_len: break result_set.add((gram, start, start + gram_width)) start += width results = list(result_set) result = sorted(results, key=lambda x: x[-1]) else: result = list(self.model.tokenize(unicode_sentence, mode=mode)) return result # FILE pycorrector/pycorrector/utils/tokenizer.py def tokenize_words(text): """Word segmentation""" output = [] sentences = split_2_short_text(text, include_symbol=True) for sentence, idx in sentences: if is_chinese_string(sentence): import jieba output.extend(jieba.lcut(sentence)) else: output.extend(whitespace_tokenize(sentence)) return output # FILE pycorrector/pycorrector/utils/tokenizer.py def split_2_short_text(text, include_symbol=True): """ 文本切分为句子,以标点符号切分 :param text: str :param include_symbol: bool :return: (sentence, idx) """ result = [] sentences = re_han.split(text) start_idx = 0 for sentence in sentences: if not sentence: continue if include_symbol: result.append((sentence, start_idx)) else: if re_han.match(sentence): result.append((sentence, start_idx)) start_idx += len(sentence) return result # FILE pycorrector/pycorrector/utils/text_utils.py def is_chinese_string(string): """判断是否全为汉字""" return all(is_chinese(c) for c in string) # FILE pycorrector/pycorrector/utils/tokenizer.py def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a peice of text.""" tokens = [] if not text: return tokens sents = split_2_short_text(text, include_symbol=True) for sent, idx in sents: tokens.extend(sent.split()) return tokens # FILE pycorrector/pycorrector/utils/tokenizer.py class FullTokenizer(object): """Given Full tokenization.""" def __init__(self, lower=True): self.lower = lower def tokenize(self, text): """Tokenizes a piece of text.""" res = [] if len(text) == 0: return res if self.lower: text = text.lower() # for the multilingual and Chinese res = tokenize_words(text) return res # FILE pycorrector/pycorrector/utils/tokenizer.py class Tokenizer(object): def __init__(self, dict_path='', custom_word_freq_dict=None, custom_confusion_dict=None): self.model = jieba jieba.setLogLevel("ERROR") # 初始化大词典 if os.path.exists(dict_path): self.model.set_dictionary(dict_path) # 加载用户自定义词典 if custom_word_freq_dict: for w, f in custom_word_freq_dict.items(): self.model.add_word(w, freq=f) # 加载混淆集词典 if custom_confusion_dict: for k, word in custom_confusion_dict.items(): # 添加到分词器的自定义词典中 self.model.add_word(k) self.model.add_word(word) def tokenize(self, unicode_sentence, mode="search"): """ 切词并返回切词位置, search mode用于错误扩召回 :param unicode_sentence: query :param mode: search, default, ngram :param HMM: enable HMM :return: (w, start, start + width) model='default' """ if mode == 'ngram': n = 2 result_set = set() tokens = self.model.lcut(unicode_sentence) tokens_len = len(tokens) start = 0 for i in range(0, tokens_len): w = tokens[i] width = len(w) result_set.add((w, start, start + width)) for j in range(i, i + n): gram = "".join(tokens[i:j + 1]) gram_width = len(gram) if i + j > tokens_len: break result_set.add((gram, start, start + gram_width)) start += width results = list(result_set) result = sorted(results, key=lambda x: x[-1]) else: result = list(self.model.tokenize(unicode_sentence, mode=mode)) return result Based on the information above, please complete the function: #CURRENT_FILE: pycorrector/pycorrector/utils/tokenizer.py import os import re import jieba from jieba import posseg from pycorrector.utils.text_utils import is_chinese_string import jieba def segment(sentence, cut_type='word', pos=False): """ 切词 :param sentence: :param cut_type: 'word' use jieba.lcut; 'char' use list(sentence) :param pos: enable POS :return: list """
pycorrector/pycorrector/utils/tokenizer.py
jinja2.utils.object_type_repr
You are a Python programmer. Here is all the context you may find useful to complete the function: # LIB typing_extensions.py def final(f): """This decorator can be used to indicate to type checkers that the decorated method cannot be overridden, and decorated class cannot be subclassed. For example: class Base: @final def done(self) -> None: ... class Sub(Base): def done(self) -> None: # Error reported by type checker ... @final class Leaf: ... class Other(Leaf): # Error reported by type checker ... There is no runtime checking of these properties. The decorator sets the ``__final__`` attribute to ``True`` on the decorated object to allow runtime introspection. """ try: f.__final__ = True except (AttributeError, TypeError): # Skip the attribute silently if it is not writable. # AttributeError happens if the object has __slots__ or a # read-only property, TypeError if it's a builtin class. pass return f # FILE Jinja2/src/jinja2/utils.py class LRUCache: """A simple LRU Cache implementation.""" def __init__(self, capacity: int) -> None: self.capacity = capacity self._mapping: t.Dict[t.Any, t.Any] = {} self._queue: "te.Deque[t.Any]" = deque() self._postinit() def _postinit(self) -> None: ... def __getstate__(self) -> t.Mapping[str, t.Any]: ... def __setstate__(self, d: t.Mapping[str, t.Any]) -> None: ... def __getnewargs__(self) -> t.Tuple: ... def copy(self) -> "LRUCache": """Return a shallow copy of the instance.""" ... def get(self, key: t.Any, default: t.Any = None) -> t.Any: """Return an item from the cache dict or `default`""" try: return self[key] except KeyError: return default def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any: """Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key. """ ... def clear(self) -> None: """Clear the cache.""" ... def __contains__(self, key: t.Any) -> bool: """Check if a key exists in this cache.""" ... def __len__(self) -> int: """Return the current size of the cache.""" ... def __repr__(self) -> str: ... def __getitem__(self, key: t.Any) -> t.Any: """Get an item from the cache. Moves the item up so that it has the highest priority then. Raise a `KeyError` if it does not exist. """ ... def __setitem__(self, key: t.Any, value: t.Any) -> None: """Sets the value for an item. Moves the item up so that it has the highest priority then. """ ... def __delitem__(self, key: t.Any) -> None: """Remove an item from the cache dict. Raise a `KeyError` if it does not exist. """ ... def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]: """Return a list of items.""" ... def values(self) -> t.Iterable[t.Any]: """Return a list of all values.""" ... def keys(self) -> t.Iterable[t.Any]: """Return a list of all keys ordered by most recent usage.""" ... def __iter__(self) -> t.Iterator[t.Any]: ... def __reversed__(self) -> t.Iterator[t.Any]: """Iterate over the keys in the cache dict, oldest items coming first. """ ... Based on the information above, please complete the function: #CURRENT_FILE: Jinja2/src/jinja2/utils.py import enum import json import os import re import typing as t from collections import abc from collections import deque from random import choice from random import randrange from threading import Lock from types import CodeType from urllib.parse import quote_from_bytes import markupsafe import typing_extensions as te from .runtime import Undefined from .environment import get_spontaneous_environment from .lexer import _lexer_cache from pprint import pformat from .constants import LOREM_IPSUM_WORDS def object_type_repr(obj: t.Any) -> str: """Returns the name of the object's type. For some recognized singletons the name of the object is returned instead. (For example for `None` and `Ellipsis`). """
Jinja2/src/jinja2/utils.py
jinja2.utils.LRUCache.setdefault
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE Jinja2/src/jinja2/utils.py class Namespace: """A namespace object that can hold arbitrary attributes. It may be initialized from a dictionary or with keyword arguments.""" def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902 self, args = args[0], args[1:] self.__attrs = dict(*args, **kwargs) def __getattribute__(self, name: str) -> t.Any: ... def __setitem__(self, name: str, value: t.Any) -> None: self.__attrs[name] = value def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/utils.py class Namespace: """A namespace object that can hold arbitrary attributes. It may be initialized from a dictionary or with keyword arguments.""" def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902 self, args = args[0], args[1:] self.__attrs = dict(*args, **kwargs) def __getattribute__(self, name: str) -> t.Any: ... def __setitem__(self, name: str, value: t.Any) -> None: ... def __repr__(self) -> str: return f"<Namespace {self.__attrs!r}>" # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: return type(self) is type(other) def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: ... def __str__(self) -> str: ... def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: ... def __bool__(self) -> bool: ... def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: return not self.__eq__(other) def __hash__(self) -> int: ... def __str__(self) -> str: ... def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: ... def __bool__(self) -> bool: ... def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: return id(type(self)) def __str__(self) -> str: ... def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: ... def __bool__(self) -> bool: ... def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: ... def __str__(self) -> str: return "" def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: ... def __bool__(self) -> bool: ... def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: ... def __str__(self) -> str: ... def __len__(self) -> int: return 0 def __iter__(self) -> t.Iterator[t.Any]: ... def __bool__(self) -> bool: ... def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: ... def __str__(self) -> str: ... def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: yield from () def __bool__(self) -> bool: ... def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: ... def __str__(self) -> str: ... def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: ... def __bool__(self) -> bool: return False def __repr__(self) -> str: ... # FILE Jinja2/src/jinja2/runtime.py class Undefined: """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... jinja2.exceptions.UndefinedError: 'foo' is undefined """ def __init__( self, hint: t.Optional[str] = None, obj: t.Any = missing, name: t.Optional[str] = None, exc: t.Type[TemplateRuntimeError] = UndefinedError, ) -> None: self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc def _undefined_message(self) -> str: """Build a message about the undefined value based on how it was accessed. """ ... def _fail_with_undefined_error( """Raise an :exc:`UndefinedError` when operations are performed on the undefined value. """ ... def __getattr__(self, name: str) -> t.Any: ... def __eq__(self, other: t.Any) -> bool: ... def __ne__(self, other: t.Any) -> bool: ... def __hash__(self) -> int: ... def __str__(self) -> str: ... def __len__(self) -> int: ... def __iter__(self) -> t.Iterator[t.Any]: ... def __bool__(self) -> bool: ... def __repr__(self) -> str: return "Undefined" # FILE Jinja2/src/jinja2/utils.py class Cycler: """Cycle through values by yield them one at a time, then restarting once the end is reached. Available as ``cycler`` in templates. Similar to ``loop.cycle``, but can be used outside loops or across multiple loops. For example, render a list of folders and files in a list, alternating giving them "odd" and "even" classes. .. code-block:: html+jinja {% set row_class = cycler("odd", "even") %} <ul class="browser"> {% for folder in folders %} <li class="folder {{ row_class.next() }}">{{ folder }} {% endfor %} {% for file in files %} <li class="file {{ row_class.next() }}">{{ file }} {% endfor %} </ul> :param items: Each positional argument will be yielded in the order given for each cycle. .. versionadded:: 2.1 """ def __init__(self, *items: t.Any) -> None: if not items: raise RuntimeError("at least one item has to be provided") self.items = items self.pos = 0 def reset(self) -> None: """Resets the current item to the first item.""" ... def current(self) -> t.Any: """Return the current item. Equivalent to the item that will be returned next time :meth:`next` is called. """ return self.items[self.pos] def next(self) -> t.Any: """Return the current item, then advance :attr:`current` to the next item. """ ... Based on the information above, please complete the function: #CURRENT_FILE: Jinja2/src/jinja2/utils.py import enum import json import os import re import typing as t from collections import abc from collections import deque from random import choice from random import randrange from threading import Lock from types import CodeType from urllib.parse import quote_from_bytes import markupsafe import typing_extensions as te from .runtime import Undefined from .environment import get_spontaneous_environment from .lexer import _lexer_cache from pprint import pformat from .constants import LOREM_IPSUM_WORDS class LRUCache: """A simple LRU Cache implementation.""" # this is fast for small capacities (something below 1000) but doesn't # scale. But as long as it's only used as storage for templates this # won't do any harm. def __init__(self, capacity: int) -> None: self.capacity = capacity self._mapping: t.Dict[t.Any, t.Any] = {} self._queue: "te.Deque[t.Any]" = deque() self._postinit() def _postinit(self) -> None: # alias all queue methods for faster lookup self._popleft = self._queue.popleft self._pop = self._queue.pop self._remove = self._queue.remove self._wlock = Lock() self._append = self._queue.append def __getstate__(self) -> t.Mapping[str, t.Any]: return { "capacity": self.capacity, "_mapping": self._mapping, "_queue": self._queue, } def __setstate__(self, d: t.Mapping[str, t.Any]) -> None: self.__dict__.update(d) self._postinit() def __getnewargs__(self) -> t.Tuple: return (self.capacity,) def copy(self) -> "LRUCache": """Return a shallow copy of the instance.""" rv = self.__class__(self.capacity) rv._mapping.update(self._mapping) rv._queue.extend(self._queue) return rv def get(self, key: t.Any, default: t.Any = None) -> t.Any: """Return an item from the cache dict or `default`""" try: return self[key] except KeyError: return default def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any: """Set `default` if the key is not in the cache otherwise leave unchanged. Return the value of this key. """
Jinja2/src/jinja2/utils.py
sumy.summarizers.sum_basic.SumBasicSummarizer._compute_word_freq
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): return self._stemmer(self.normalize_word(word)) def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): ... # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): ... def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): raise NotImplementedError("This method should be overriden in subclass") def stem_word(self, word): return self._stemmer(self.normalize_word(word)) @staticmethod def normalize_word(word): return to_unicode(word).lower() @staticmethod def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) Based on the information above, please complete the function: #CURRENT_FILE: sumy/sumy/summarizers/sum_basic.py from __future__ import absolute_import from __future__ import division, print_function, unicode_literals from ._summarizer import AbstractSummarizer class SumBasicSummarizer(AbstractSummarizer): """ SumBasic: a frequency-based summarization system that adjusts word frequencies as sentences are extracted. Source: http://www.cis.upenn.edu/~nenkova/papers/ipm.pdf """ _stop_words = frozenset() @property def stop_words(self): return self._stop_words @stop_words.setter def stop_words(self, words): self._stop_words = frozenset(map(self.normalize_word, words)) def __call__(self, document, sentences_count): sentences = document.sentences ratings = self._compute_ratings(sentences) return self._get_best_sentences(document.sentences, sentences_count, ratings) def _get_all_words_in_doc(self, sentences): return self._stem_words([w for s in sentences for w in s.words]) def _get_content_words_in_sentence(self, sentence): normalized_words = self._normalize_words(sentence.words) normalized_content_words = self._filter_out_stop_words(normalized_words) stemmed_normalized_content_words = self._stem_words(normalized_content_words) return stemmed_normalized_content_words def _stem_words(self, words): return [self.stem_word(w) for w in words] def _normalize_words(self, words): return [self.normalize_word(w) for w in words] def _filter_out_stop_words(self, words): return [w for w in words if w not in self.stop_words] @staticmethod def _compute_word_freq(list_of_words):
sumy/sumy/summarizers/sum_basic.py
sumy.summarizers.sum_basic.SumBasicSummarizer._compute_average_probability_of_words
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): return self._stemmer(self.normalize_word(word)) def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): ... # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): ... def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): raise NotImplementedError("This method should be overriden in subclass") def stem_word(self, word): return self._stemmer(self.normalize_word(word)) @staticmethod def normalize_word(word): return to_unicode(word).lower() @staticmethod def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) Based on the information above, please complete the function: #CURRENT_FILE: sumy/sumy/summarizers/sum_basic.py from __future__ import absolute_import from __future__ import division, print_function, unicode_literals from ._summarizer import AbstractSummarizer class SumBasicSummarizer(AbstractSummarizer): """ SumBasic: a frequency-based summarization system that adjusts word frequencies as sentences are extracted. Source: http://www.cis.upenn.edu/~nenkova/papers/ipm.pdf """ _stop_words = frozenset() @property def stop_words(self): return self._stop_words @stop_words.setter def stop_words(self, words): self._stop_words = frozenset(map(self.normalize_word, words)) def __call__(self, document, sentences_count): sentences = document.sentences ratings = self._compute_ratings(sentences) return self._get_best_sentences(document.sentences, sentences_count, ratings) def _get_all_words_in_doc(self, sentences): return self._stem_words([w for s in sentences for w in s.words]) def _get_content_words_in_sentence(self, sentence): normalized_words = self._normalize_words(sentence.words) normalized_content_words = self._filter_out_stop_words(normalized_words) stemmed_normalized_content_words = self._stem_words(normalized_content_words) return stemmed_normalized_content_words def _stem_words(self, words): return [self.stem_word(w) for w in words] def _normalize_words(self, words): return [self.normalize_word(w) for w in words] def _filter_out_stop_words(self, words): return [w for w in words if w not in self.stop_words] @staticmethod def _compute_word_freq(list_of_words): word_freq = {} for w in list_of_words: word_freq[w] = word_freq.get(w, 0) + 1 return word_freq def _get_all_content_words_in_doc(self, sentences): all_words = self._get_all_words_in_doc(sentences) content_words = self._filter_out_stop_words(all_words) normalized_content_words = self._normalize_words(content_words) return normalized_content_words def _compute_tf(self, sentences): """ Computes the normalized term frequency as explained in http://www.tfidf.com/ """ content_words = self._get_all_content_words_in_doc(sentences) content_words_count = len(content_words) content_words_freq = self._compute_word_freq(content_words) content_word_tf = dict((k, v / content_words_count) for (k, v) in content_words_freq.items()) return content_word_tf @staticmethod def _compute_average_probability_of_words(word_freq_in_doc, content_words_in_sentence):
sumy/sumy/summarizers/sum_basic.py
sumy.summarizers.lex_rank.LexRankSummarizer._compute_idf
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): return self._stemmer(self.normalize_word(word)) def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): ... # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): raise NotImplementedError("This method should be overriden in subclass") def stem_word(self, word): return self._stemmer(self.normalize_word(word)) @staticmethod def normalize_word(word): return to_unicode(word).lower() @staticmethod def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): raise NotImplementedError("This method should be overriden in subclass") def stem_word(self, word): ... def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): ... # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): ... def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) Based on the information above, please complete the function: #CURRENT_FILE: sumy/sumy/summarizers/lex_rank.py from __future__ import absolute_import from __future__ import division, print_function, unicode_literals import math import numpy from collections import Counter from ._summarizer import AbstractSummarizer class LexRankSummarizer(AbstractSummarizer): """ LexRank: Graph-based Centrality as Salience in Text Summarization Source: http://tangra.si.umich.edu/~radev/lexrank/lexrank.pdf """ threshold = 0.1 epsilon = 0.1 _stop_words = frozenset() @property def stop_words(self): return self._stop_words @stop_words.setter def stop_words(self, words): self._stop_words = frozenset(map(self.normalize_word, words)) def __call__(self, document, sentences_count): self._ensure_dependencies_installed() sentences_words = [self._to_words_set(s) for s in document.sentences] if not sentences_words: return tuple() tf_metrics = self._compute_tf(sentences_words) idf_metrics = self._compute_idf(sentences_words) matrix = self._create_matrix(sentences_words, self.threshold, tf_metrics, idf_metrics) scores = self.power_method(matrix, self.epsilon) ratings = dict(zip(document.sentences, scores)) return self._get_best_sentences(document.sentences, sentences_count, ratings) @staticmethod def _ensure_dependencies_installed(): if numpy is None: raise ValueError("LexRank summarizer requires NumPy. Please, install it by command 'pip install numpy'.") def _to_words_set(self, sentence): words = map(self.normalize_word, sentence.words) return [self.stem_word(w) for w in words if w not in self._stop_words] def _compute_tf(self, sentences): tf_values = map(Counter, sentences) tf_metrics = [] for sentence in tf_values: metrics = {} max_tf = self._find_tf_max(sentence) for term, tf in sentence.items(): metrics[term] = tf / max_tf tf_metrics.append(metrics) return tf_metrics @staticmethod def _find_tf_max(terms): return max(terms.values()) if terms else 1 @staticmethod def _compute_idf(sentences):
sumy/sumy/summarizers/lex_rank.py
sumy.summarizers.lex_rank.LexRankSummarizer.cosine_similarity
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): ... def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): ... # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): return self._stemmer(self.normalize_word(word)) def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): ... # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): raise NotImplementedError("This method should be overriden in subclass") def stem_word(self, word): return self._stemmer(self.normalize_word(word)) @staticmethod def normalize_word(word): return to_unicode(word).lower() @staticmethod def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): raise NotImplementedError("This method should be overriden in subclass") def stem_word(self, word): ... def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): ... # FILE sumy/sumy/summarizers/_summarizer.py class AbstractSummarizer(object): def __init__(self, stemmer=null_stemmer): if not callable(stemmer): raise ValueError("Stemmer has to be a callable object") self._stemmer = stemmer def __call__(self, document, sentences_count): ... def stem_word(self, word): ... def normalize_word(word): ... def _get_best_sentences(sentences, count, rating, *args, **kwargs): rate = rating if isinstance(rating, dict): assert not args and not kwargs rate = lambda s: rating[s] infos = (SentenceInfo(s, o, rate(s, *args, **kwargs)) for o, s in enumerate(sentences)) # sort sentences by rating in descending order infos = sorted(infos, key=attrgetter("rating"), reverse=True) # get `count` first best rated sentences if not callable(count): count = ItemsCount(count) infos = count(infos) # sort sentences by their order in document infos = sorted(infos, key=attrgetter("order")) return tuple(i.sentence for i in infos) Based on the information above, please complete the function: #CURRENT_FILE: sumy/sumy/summarizers/lex_rank.py from __future__ import absolute_import from __future__ import division, print_function, unicode_literals import math import numpy from collections import Counter from ._summarizer import AbstractSummarizer class LexRankSummarizer(AbstractSummarizer): """ LexRank: Graph-based Centrality as Salience in Text Summarization Source: http://tangra.si.umich.edu/~radev/lexrank/lexrank.pdf """ threshold = 0.1 epsilon = 0.1 _stop_words = frozenset() @property def stop_words(self): return self._stop_words @stop_words.setter def stop_words(self, words): self._stop_words = frozenset(map(self.normalize_word, words)) def __call__(self, document, sentences_count): self._ensure_dependencies_installed() sentences_words = [self._to_words_set(s) for s in document.sentences] if not sentences_words: return tuple() tf_metrics = self._compute_tf(sentences_words) idf_metrics = self._compute_idf(sentences_words) matrix = self._create_matrix(sentences_words, self.threshold, tf_metrics, idf_metrics) scores = self.power_method(matrix, self.epsilon) ratings = dict(zip(document.sentences, scores)) return self._get_best_sentences(document.sentences, sentences_count, ratings) @staticmethod def _ensure_dependencies_installed(): if numpy is None: raise ValueError("LexRank summarizer requires NumPy. Please, install it by command 'pip install numpy'.") def _to_words_set(self, sentence): words = map(self.normalize_word, sentence.words) return [self.stem_word(w) for w in words if w not in self._stop_words] def _compute_tf(self, sentences): tf_values = map(Counter, sentences) tf_metrics = [] for sentence in tf_values: metrics = {} max_tf = self._find_tf_max(sentence) for term, tf in sentence.items(): metrics[term] = tf / max_tf tf_metrics.append(metrics) return tf_metrics @staticmethod def _find_tf_max(terms): return max(terms.values()) if terms else 1 @staticmethod def _compute_idf(sentences): idf_metrics = {} sentences_count = len(sentences) for sentence in sentences: for term in sentence: if term not in idf_metrics: n_j = sum(1 for s in sentences if term in s) idf_metrics[term] = math.log(sentences_count / (1 + n_j)) return idf_metrics def _create_matrix(self, sentences, threshold, tf_metrics, idf_metrics): """ Creates matrix of shape |sentences|×|sentences|. """ # create matrix |sentences|×|sentences| filled with zeroes sentences_count = len(sentences) matrix = numpy.zeros((sentences_count, sentences_count)) degrees = numpy.zeros((sentences_count, )) for row, (sentence1, tf1) in enumerate(zip(sentences, tf_metrics)): for col, (sentence2, tf2) in enumerate(zip(sentences, tf_metrics)): matrix[row, col] = self.cosine_similarity(sentence1, sentence2, tf1, tf2, idf_metrics) if matrix[row, col] > threshold: matrix[row, col] = 1.0 degrees[row] += 1 else: matrix[row, col] = 0 for row in range(sentences_count): for col in range(sentences_count): if degrees[row] == 0: degrees[row] = 1 matrix[row][col] = matrix[row][col] / degrees[row] return matrix @staticmethod def cosine_similarity(sentence1, sentence2, tf1, tf2, idf_metrics): """ We compute idf-modified-cosine(sentence1, sentence2) here. It's cosine similarity of these two sentences (vectors) A, B computed as cos(x, y) = A . B / (|A| . |B|) Sentences are represented as vector TF*IDF metrics. :param sentence1: Iterable object where every item represents word of 1st sentence. :param sentence2: Iterable object where every item represents word of 2nd sentence. :type tf1: dict :param tf1: Term frequencies of words from 1st sentence. :type tf2: dict :param tf2: Term frequencies of words from 2nd sentence :type idf_metrics: dict :param idf_metrics: Inverted document metrics of the sentences. Every sentence is treated as document for this algorithm. :rtype: float :return: Returns -1.0 for opposite similarity, 1.0 for the same sentence and zero for no similarity between sentences. """
sumy/sumy/summarizers/lex_rank.py
sumy.evaluation.rouge._get_ngrams
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE sumy/sumy/evaluation/rouge.py def rouge_n(evaluated_sentences, reference_sentences, n=2): """ Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the reference set :param n: Size of ngram. Defaults to 2. :returns: float 0 <= ROUGE-N <= 1, where 0 means no overlap and 1 means exactly the same. :raises ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise (ValueError("Collections must contain at least 1 sentence.")) evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences) reference_ngrams = _get_word_ngrams(n, reference_sentences) reference_count = len(reference_ngrams) # Gets the overlapping ngrams between evaluated and reference overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) return overlapping_count / reference_count # FILE sumy/sumy/evaluation/rouge.py def _get_word_ngrams(n, sentences): assert (len(sentences) > 0) assert (n > 0) words = set() for sentence in sentences: words.update(_get_ngrams(n, _split_into_words([sentence]))) return words # FILE sumy/sumy/evaluation/rouge.py def _split_into_words(sentences): full_text_words = [] for s in sentences: if not isinstance(s, Sentence): raise (ValueError("Object in collection must be of type Sentence")) full_text_words.extend(s.words) return full_text_words Based on the information above, please complete the function: #CURRENT_FILE: sumy/sumy/evaluation/rouge.py from __future__ import absolute_import from __future__ import division, print_function, unicode_literals from ..models.dom import Sentence def _get_ngrams(n, text):
sumy/sumy/evaluation/rouge.py
sumy.evaluation.rouge._split_into_words
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE sumy/sumy/evaluation/rouge.py def rouge_n(evaluated_sentences, reference_sentences, n=2): """ Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf :param evaluated_sentences: The sentences that have been picked by the summarizer :param reference_sentences: The sentences from the reference set :param n: Size of ngram. Defaults to 2. :returns: float 0 <= ROUGE-N <= 1, where 0 means no overlap and 1 means exactly the same. :raises ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise (ValueError("Collections must contain at least 1 sentence.")) evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences) reference_ngrams = _get_word_ngrams(n, reference_sentences) reference_count = len(reference_ngrams) # Gets the overlapping ngrams between evaluated and reference overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) return overlapping_count / reference_count # FILE sumy/sumy/evaluation/rouge.py def _get_word_ngrams(n, sentences): assert (len(sentences) > 0) assert (n > 0) words = set() for sentence in sentences: words.update(_get_ngrams(n, _split_into_words([sentence]))) return words Based on the information above, please complete the function: #CURRENT_FILE: sumy/sumy/evaluation/rouge.py from __future__ import absolute_import from __future__ import division, print_function, unicode_literals from ..models.dom import Sentence def _split_into_words(sentences):
sumy/sumy/evaluation/rouge.py
falcon.inspect.register_router
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE falcon/falcon/inspect.py def to_string(self, verbose=False, internal=False) -> str: """Return a string representation of this class. Args: verbose (bool, optional): Adds more information. Defaults to False. internal (bool, optional): Also include internal route methods and error handlers added by the framework. Defaults to ``False``. Returns: str: string representation of this class. """ return StringVisitor(verbose, internal).process(self) # FILE falcon/falcon/inspect.py class StringVisitor(InspectVisitor): """Visitor that returns a string representation of the info class. This is used automatically by calling ``to_string()`` on the info class. It can also be used directly by calling ``StringVisitor.process(info_instance)``. Args: verbose (bool, optional): Adds more information. Defaults to ``False``. internal (bool, optional): Also include internal route methods and error handlers added by the framework. Defaults to ``False``. name (str, optional): The name of the application, to be output at the beginning of the text. Defaults to ``'Falcon App'``. """ def __init__(self, verbose=False, internal=False, name=''): self.verbose = verbose self.internal = internal self.name = name self.indent = 0 @property def tab(self): """Get the current tabulation.""" return ' ' * self.indent def visit_route_method(self, route_method: RouteMethodInfo) -> str: """Visit a RouteMethodInfo instance. Usually called by `process`.""" text = '{0.method} - {0.function_name}'.format(route_method) if self.verbose: text += ' ({0.source_info})'.format(route_method) return text def _methods_to_string(self, methods: List): """Return a string from the list of methods.""" tab = self.tab + ' ' * 3 methods = _filter_internal(methods, self.internal) if not methods: return '' text_list = [self.process(m) for m in methods] method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]] method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]] return '\n'.join(method_text) def visit_route(self, route: RouteInfo) -> str: """Visit a RouteInfo instance. Usually called by `process`.""" text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route) if self.verbose: text += ' ({0.source_info})'.format(route) method_text = self._methods_to_string(route.methods) if not method_text: return text return '{}:\n{}'.format(text, method_text) def visit_static_route(self, static_route: StaticRouteInfo) -> str: """Visit a StaticRouteInfo instance. Usually called by `process`.""" text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route) if static_route.fallback_filename: text += ' [{0.fallback_filename}]'.format(static_route) return text def visit_sink(self, sink: SinkInfo) -> str: """Visit a SinkInfo instance. Usually called by `process`.""" text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink) if self.verbose: text += ' ({0.source_info})'.format(sink) return text def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str: """Visit a ErrorHandlerInfo instance. Usually called by `process`.""" text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler) if self.verbose: text += ' ({0.source_info})'.format(error_handler) return text def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str: """Visit a MiddlewareMethodInfo instance. Usually called by `process`.""" text = '{0.function_name}'.format(middleware_method) if self.verbose: text += ' ({0.source_info})'.format(middleware_method) return text def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str: """Visit a ErrorHandlerInfo instance. Usually called by `process`.""" text = '{0}↣ {1.name}'.format(self.tab, middleware_class) if self.verbose: text += ' ({0.source_info})'.format(middleware_class) method_text = self._methods_to_string(middleware_class.methods) if not method_text: return text return '{}:\n{}'.format(text, method_text) def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str: """Visit a MiddlewareTreeItemInfo instance. Usually called by `process`.""" symbol = mti._symbols.get(mti.name, '→') return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti) def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str: """Visit a MiddlewareTreeInfo instance. Usually called by `process`.""" before = len(m_tree.request) + len(m_tree.resource) after = len(m_tree.response) if before + after == 0: return '' each = 2 initial = self.indent if after > before: self.indent += each * (after - before) text = [] for r in m_tree.request: text.append(self.process(r)) self.indent += each if text: text.append('') for r in m_tree.resource: text.append(self.process(r)) self.indent += each if m_tree.resource or not text: text.append('') self.indent += each text.append('{}├── Process route responder'.format(self.tab)) self.indent -= each if m_tree.response: text.append('') for r in m_tree.response: self.indent -= each text.append(self.process(r)) self.indent = initial return '\n'.join(text) def visit_middleware(self, middleware: MiddlewareInfo) -> str: """Visit a MiddlewareInfo instance. Usually called by `process`.""" text = self.process(middleware.middleware_tree) if self.verbose: self.indent += 4 m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes) self.indent -= 4 if m_text: text += '\n{}- Middleware classes:\n{}'.format(self.tab, m_text) return text def visit_app(self, app: AppInfo) -> str: """Visit a AppInfo instance. Usually called by `process`.""" type_ = 'ASGI' if app.asgi else 'WSGI' self.indent = 4 text = '{} ({})'.format(self.name or 'Falcon App', type_) if app.routes: routes = '\n'.join(self.process(r) for r in app.routes) text += '\n• Routes:\n{}'.format(routes) middleware_text = self.process(app.middleware) if middleware_text: text += '\n• Middleware ({}):\n{}'.format( app.middleware.independent_text, middleware_text ) if app.static_routes: static_routes = '\n'.join(self.process(sr) for sr in app.static_routes) text += '\n• Static routes:\n{}'.format(static_routes) if app.sinks: sinks = '\n'.join(self.process(s) for s in app.sinks) text += '\n• Sinks:\n{}'.format(sinks) errors = _filter_internal(app.error_handlers, self.internal) if errors: errs = '\n'.join(self.process(e) for e in errors) text += '\n• Error handlers:\n{}'.format(errs) return text # FILE falcon/falcon/inspect.py def inspect_app(app: App) -> 'AppInfo': """Inspects an application. Args: app (falcon.App): The application to inspect. Works with both :class:`falcon.App` and :class:`falcon.asgi.App`. Returns: AppInfo: The information regarding the application. Call :meth:`~.AppInfo.to_string` on the result to obtain a human-friendly representation. """ routes = inspect_routes(app) static = inspect_static_routes(app) sinks = inspect_sinks(app) error_handlers = inspect_error_handlers(app) middleware = inspect_middleware(app) return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI) # FILE falcon/falcon/inspect.py def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]': """Inspects the static routes of an application. Args: app (falcon.App): The application to inspect. Works with both :class:`falcon.App` and :class:`falcon.asgi.App`. Returns: List[StaticRouteInfo]: A list of static routes that have been added to the application. """ routes = [] for sr, _, _ in app._static_routes: info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename) routes.append(info) return routes # FILE falcon/falcon/inspect.py def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]': """Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes. Default route inspector for CompiledRouter. Args: router (CompiledRouter): The router to inspect. Returns: List[RouteInfo]: A list of :class:`~.RouteInfo`. """ def _traverse(roots, parent): for root in roots: path = parent + '/' + root.raw_segment if root.resource is not None: methods = [] if root.method_map: for method, func in root.method_map.items(): if isinstance(func, partial): real_func = func.func else: real_func = func source_info = _get_source_info(real_func) internal = _is_internal(real_func) method_info = RouteMethodInfo( method, source_info, real_func.__name__, internal ) methods.append(method_info) source_info, class_name = _get_source_info_and_name(root.resource) route_info = RouteInfo(path, class_name, source_info, methods) routes.append(route_info) if root.children: _traverse(root.children, path) routes = [] # type: List[RouteInfo] _traverse(router._roots, '') return routes # FILE falcon/falcon/app.py class App: """This class is the main entry point into a Falcon-based WSGI app. Each App instance provides a callable `WSGI <https://www.python.org/dev/peps/pep-3333/>`_ interface and a routing engine (for ASGI applications, see :class:`falcon.asgi.App`). Note: The ``API`` class was renamed to ``App`` in Falcon 3.0. The old class name remains available as an alias for backwards-compatibility, but will be removed in a future release. Keyword Arguments: media_type (str): Default media type to use when initializing :py:class:`~.RequestOptions` and :py:class:`~.ResponseOptions`. The ``falcon`` module provides a number of constants for common media types, such as ``falcon.MEDIA_MSGPACK``, ``falcon.MEDIA_YAML``, ``falcon.MEDIA_XML``, etc. middleware: Either a single middleware component object or an iterable of objects (instantiated classes) that implement the following middleware component interface. Note that it is only necessary to implement the methods for the events you would like to handle; Falcon simply skips over any missing middleware methods:: class ExampleComponent: def process_request(self, req, resp): \"\"\"Process the request before routing it. Note: Because Falcon routes each request based on req.path, a request can be effectively re-routed by setting that attribute to a new value from within process_request(). Args: req: Request object that will eventually be routed to an on_* responder method. resp: Response object that will be routed to the on_* responder. \"\"\" def process_resource(self, req, resp, resource, params): \"\"\"Process the request and resource *after* routing. Note: This method is only called when the request matches a route to a resource. Args: req: Request object that will be passed to the routed responder. resp: Response object that will be passed to the responder. resource: Resource object to which the request was routed. May be None if no route was found for the request. params: A dict-like object representing any additional params derived from the route's URI template fields, that will be passed to the resource's responder method as keyword arguments. \"\"\" def process_response(self, req, resp, resource, req_succeeded) \"\"\"Post-processing of the response (after routing). Args: req: Request object. resp: Response object. resource: Resource object to which the request was routed. May be None if no route was found for the request. req_succeeded: True if no exceptions were raised while the framework processed and routed the request; otherwise False. \"\"\" (See also: :ref:`Middleware <middleware>`) request_type: ``Request``-like class to use instead of Falcon's default class. Among other things, this feature affords inheriting from :class:`falcon.Request` in order to override the ``context_type`` class variable (default: :class:`falcon.Request`) response_type: ``Response``-like class to use instead of Falcon's default class (default: :class:`falcon.Response`) router (object): An instance of a custom router to use in lieu of the default engine. (See also: :ref:`Custom Routers <routing_custom>`) independent_middleware (bool): Set to ``False`` if response middleware should not be executed independently of whether or not request middleware raises an exception (default ``True``). When this option is set to ``False``, a middleware component's ``process_response()`` method will NOT be called when that same component's ``process_request()`` (or that of a component higher up in the stack) raises an exception. cors_enable (bool): Set this flag to ``True`` to enable a simple CORS policy for all responses, including support for preflighted requests. An instance of :py:class:`~.CORSMiddleware` can instead be passed to the middleware argument to customize its behaviour. (default ``False``). (See also: :ref:`CORS <cors>`) sink_before_static_route (bool): Indicates if the sinks should be processed before (when ``True``) or after (when ``False``) the static routes. This has an effect only if no route was matched. (default ``True``) Attributes: req_options: A set of behavioral options related to incoming requests. (See also: :py:class:`~.RequestOptions`) resp_options: A set of behavioral options related to outgoing responses. (See also: :py:class:`~.ResponseOptions`) router_options: Configuration options for the router. If a custom router is in use, and it does not expose any configurable options, referencing this attribute will raise an instance of ``AttributeError``. (See also: :ref:`CompiledRouterOptions <compiled_router_options>`) """ _META_METHODS = frozenset(constants._META_METHODS) _STREAM_BLOCK_SIZE = 8 * 1024 # 8 KiB _STATIC_ROUTE_TYPE = routing.StaticRoute # NOTE(kgriffs): This makes it easier to tell what we are dealing with # without having to import falcon.asgi to get at the falcon.asgi.App # type (which we may not be able to do under Python 3.5). _ASGI = False # NOTE(kgriffs): We do it like this rather than just implementing the # methods directly on the class, so that we keep all the default # responders colocated in the same module. This will make it more # likely that the implementations of the async and non-async versions # of the methods are kept in sync (pun intended). _default_responder_bad_request = responders.bad_request _default_responder_path_not_found = responders.path_not_found __slots__ = ( '_cors_enable', '_error_handlers', '_independent_middleware', '_middleware', # NOTE(kgriffs): WebSocket is currently only supported for # ASGI apps, but we may add support for WSGI at some point. '_middleware_ws', '_request_type', '_response_type', '_router_search', '_router', '_serialize_error', '_sink_and_static_routes', '_sink_before_static_route', '_sinks', '_static_routes', '_unprepared_middleware', 'req_options', 'resp_options', ) def __init__( self, media_type=constants.DEFAULT_MEDIA_TYPE, request_type=Request, response_type=Response, middleware=None, router=None, independent_middleware=True, cors_enable=False, sink_before_static_route=True, ): self._sink_before_static_route = sink_before_static_route self._sinks = [] self._static_routes = [] self._sink_and_static_routes = () if cors_enable: cm = CORSMiddleware() if middleware is None: middleware = [cm] else: try: # NOTE(kgriffs): Check to see if middleware is an # iterable, and if so, append the CORSMiddleware # instance. iter(middleware) middleware = list(middleware) middleware.append(cm) except TypeError: # NOTE(kgriffs): Assume the middleware kwarg references # a single middleware component. middleware = [middleware, cm] # set middleware self._unprepared_middleware = [] self._independent_middleware = independent_middleware self.add_middleware(middleware) self._router = router or routing.DefaultRouter() self._router_search = self._router.find self._request_type = request_type self._response_type = response_type self._error_handlers = {} self._serialize_error = helpers.default_serialize_error self.req_options = RequestOptions() self.resp_options = ResponseOptions() self.req_options.default_media_type = media_type self.resp_options.default_media_type = media_type # NOTE(kgriffs): Add default error handlers self.add_error_handler(Exception, self._python_error_handler) self.add_error_handler(HTTPError, self._http_error_handler) self.add_error_handler(HTTPStatus, self._http_status_handler) def __call__(self, env, start_response): # noqa: C901 """WSGI `app` method. Makes instances of App callable from a WSGI server. May be used to host an App or called directly in order to simulate requests when testing the App. (See also: PEP 3333) Args: env (dict): A WSGI environment dictionary start_response (callable): A WSGI helper function for setting status and headers on a response. """ req = self._request_type(env, options=self.req_options) resp = self._response_type(options=self.resp_options) resource = None responder = None params = {} dependent_mw_resp_stack = [] mw_req_stack, mw_rsrc_stack, mw_resp_stack = self._middleware req_succeeded = False try: if req.method in self._META_METHODS: raise HTTPBadRequest() # NOTE(ealogar): The execution of request middleware # should be before routing. This will allow request mw # to modify the path. # NOTE: if flag set to use independent middleware, execute # request middleware independently. Otherwise, only queue # response middleware after request middleware succeeds. if self._independent_middleware: for process_request in mw_req_stack: process_request(req, resp) if resp.complete: break else: for process_request, process_response in mw_req_stack: if process_request and not resp.complete: process_request(req, resp) if process_response: dependent_mw_resp_stack.insert(0, process_response) if not resp.complete: # NOTE(warsaw): Moved this to inside the try except # because it is possible when using object-based # traversal for _get_responder() to fail. An example is # a case where an object does not have the requested # next-hop child resource. In that case, the object # being asked to dispatch to its child will raise an # HTTP exception signalling the problem, e.g. a 404. responder, params, resource, req.uri_template = self._get_responder(req) except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise else: try: # NOTE(kgriffs): If the request did not match any # route, a default responder is returned and the # resource is None. In that case, we skip the # resource middleware methods. Resource will also be # None when a middleware method already set # resp.complete to True. if resource: # Call process_resource middleware methods. for process_resource in mw_rsrc_stack: process_resource(req, resp, resource, params) if resp.complete: break if not resp.complete: responder(req, resp, **params) req_succeeded = True except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise # Call process_response middleware methods. for process_response in mw_resp_stack or dependent_mw_resp_stack: try: process_response(req, resp, resource, req_succeeded) except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise req_succeeded = False body = [] length = 0 try: body, length = self._get_body(resp, env.get('wsgi.file_wrapper')) except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise req_succeeded = False resp_status = code_to_http_status(resp.status) default_media_type = self.resp_options.default_media_type if req.method == 'HEAD' or resp_status in _BODILESS_STATUS_CODES: body = [] # PERF(vytas): move check for the less common and much faster path # of resp_status being in {204, 304} here; NB: this builds on the # assumption _TYPELESS_STATUS_CODES <= _BODILESS_STATUS_CODES. # NOTE(kgriffs): Based on wsgiref.validate's interpretation of # RFC 2616, as commented in that module's source code. The # presence of the Content-Length header is not similarly # enforced. if resp_status in _TYPELESS_STATUS_CODES: default_media_type = None elif ( length is not None and req.method == 'HEAD' and resp_status not in _BODILESS_STATUS_CODES and 'content-length' not in resp._headers ): # NOTE(kgriffs): We really should be returning a Content-Length # in this case according to my reading of the RFCs. By # optionally using len(data) we let a resource simulate HEAD # by turning around and calling it's own on_get(). resp._headers['content-length'] = str(length) else: # PERF(kgriffs): Böse mußt sein. Operate directly on resp._headers # to reduce overhead since this is a hot/critical code path. # NOTE(kgriffs): We always set content-length to match the # body bytes length, even if content-length is already set. The # reason being that web servers and LBs behave unpredictably # when the header doesn't match the body (sometimes choosing to # drop the HTTP connection prematurely, for example). if length is not None: resp._headers['content-length'] = str(length) headers = resp._wsgi_headers(default_media_type) # Return the response per the WSGI spec. start_response(resp_status, headers) return body @property def router_options(self): return self._router.options def add_middleware(self, middleware): """Add one or more additional middleware components. Arguments: middleware: Either a single middleware component or an iterable of components to add. The component(s) will be invoked, in order, as if they had been appended to the original middleware list passed to the class initializer. """ # NOTE(kgriffs): Since this is called by the initializer, there is # the chance that middleware may be None. if middleware: try: self._unprepared_middleware += middleware except TypeError: # middleware is not iterable; assume it is just one bare component self._unprepared_middleware.append(middleware) # NOTE(kgriffs): Even if middleware is None or an empty list, we still # need to make sure self._middleware is initialized if this is the # first call to add_middleware(). self._middleware = self._prepare_middleware( self._unprepared_middleware, independent_middleware=self._independent_middleware, ) def add_route(self, uri_template, resource, **kwargs): """Associate a templatized URI path with a resource. Falcon routes incoming requests to resources based on a set of URI templates. If the path requested by the client matches the template for a given route, the request is then passed on to the associated resource for processing. Note: If no route matches the request, control then passes to a default responder that simply raises an instance of :class:`~.HTTPRouteNotFound`. By default, this error will be rendered as a 404 response, but this behavior can be modified by adding a custom error handler (see also :ref:`this FAQ topic <faq_override_404_500_handlers>`). On the other hand, if a route is matched but the resource does not implement a responder for the requested HTTP method, the framework invokes a default responder that raises an instance of :class:`~.HTTPMethodNotAllowed`. This method delegates to the configured router's ``add_route()`` method. To override the default behavior, pass a custom router object to the :class:`~.App` initializer. (See also: :ref:`Routing <routing>`) Args: uri_template (str): A templatized URI. Care must be taken to ensure the template does not mask any sink patterns, if any are registered. (See also: :meth:`~.App.add_sink`) Warning: If :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is enabled, `uri_template` should be provided without a trailing slash. (See also: :ref:`trailing_slash_in_path`) resource (instance): Object which represents a REST resource. Falcon will pass GET requests to ``on_get()``, PUT requests to ``on_put()``, etc. If any HTTP methods are not supported by your resource, simply don't define the corresponding request handlers, and Falcon will do the right thing. Note: When using an async version of the ``App``, all request handlers must be awaitable coroutine functions. Keyword Args: suffix (str): Optional responder name suffix for this route. If a suffix is provided, Falcon will map GET requests to ``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``, etc. In this way, multiple closely-related routes can be mapped to the same resource. For example, a single resource class can use suffixed responders to distinguish requests for a single item vs. a collection of those same items. Another class might use a suffixed responder to handle a shortlink route in addition to the regular route for the resource. For example:: class Baz(object): def on_get_foo(self, req, resp): pass def on_get_bar(self, req, resp): pass baz = Baz() app = falcon.App() app.add_route('/foo', baz, suffix='foo') app.add_route('/bar', baz, suffix='bar') compile (bool): Optional flag that can be provided when using the default :class:`.CompiledRouter` to compile the routing logic on this call, since it will otherwise delay compilation until the first request is routed. See :meth:`.CompiledRouter.add_route` for further details. Note: Any additional keyword arguments not defined above are passed through to the underlying router's ``add_route()`` method. The default router ignores any additional keyword arguments, but custom routers may take advantage of this feature to receive additional options when setting up routes. Custom routers MUST accept such arguments using the variadic pattern (``**kwargs``), and ignore any keyword arguments that they don't support. """ # NOTE(richardolsson): Doing the validation here means it doesn't have # to be duplicated in every future router implementation. if not isinstance(uri_template, str): raise TypeError('uri_template is not a string') if not uri_template.startswith('/'): raise ValueError("uri_template must start with '/'") if '//' in uri_template: raise ValueError("uri_template may not contain '//'") self._router.add_route(uri_template, resource, **kwargs) def add_static_route( self, prefix, directory, downloadable=False, fallback_filename=None ): """Add a route to a directory of static files. Static routes provide a way to serve files directly. This feature provides an alternative to serving files at the web server level when you don't have that option, when authorization is required, or for testing purposes. Warning: Serving files directly from the web server, rather than through the Python app, will always be more efficient, and therefore should be preferred in production deployments. For security reasons, the directory and the fallback_filename (if provided) should be read only for the account running the application. Warning: If you need to serve large files and/or progressive downloads (such as in the case of video streaming) through the Falcon app, check that your application server's timeout settings can accomodate the expected request duration (for instance, the popular Gunicorn kills ``sync`` workers after 30 seconds unless configured otherwise). Note: For ASGI apps, file reads are made non-blocking by scheduling them on the default executor. Static routes are matched in LIFO order. Therefore, if the same prefix is used for two routes, the second one will override the first. This also means that more specific routes should be added *after* less specific ones. For example, the following sequence would result in ``'/foo/bar/thing.js'`` being mapped to the ``'/foo/bar'`` route, and ``'/foo/xyz/thing.js'`` being mapped to the ``'/foo'`` route:: app.add_static_route('/foo', foo_path) app.add_static_route('/foo/bar', foobar_path) Args: prefix (str): The path prefix to match for this route. If the path in the requested URI starts with this string, the remainder of the path will be appended to the source directory to determine the file to serve. This is done in a secure manner to prevent an attacker from requesting a file outside the specified directory. Note that static routes are matched in LIFO order, and are only attempted after checking dynamic routes and sinks. directory (Union[str, pathlib.Path]): The source directory from which to serve files. downloadable (bool): Set to ``True`` to include a Content-Disposition header in the response. The "filename" directive is simply set to the name of the requested file. fallback_filename (str): Fallback filename used when the requested file is not found. Can be a relative path inside the prefix folder or any valid absolute path. """ sr = self._STATIC_ROUTE_TYPE( prefix, directory, downloadable=downloadable, fallback_filename=fallback_filename, ) self._static_routes.insert(0, (sr, sr, False)) self._update_sink_and_static_routes() def add_sink(self, sink, prefix=r'/'): """Register a sink method for the App. If no route matches a request, but the path in the requested URI matches a sink prefix, Falcon will pass control to the associated sink, regardless of the HTTP method requested. Using sinks, you can drain and dynamically handle a large number of routes, when creating static resources and responders would be impractical. For example, you might use a sink to create a smart proxy that forwards requests to one or more backend services. Args: sink (callable): A callable taking the form ``func(req, resp, **kwargs)``. Note: When using an async version of the ``App``, this must be a coroutine. prefix (str): A regex string, typically starting with '/', which will trigger the sink if it matches the path portion of the request's URI. Both strings and precompiled regex objects may be specified. Characters are matched starting at the beginning of the URI path. Note: Named groups are converted to kwargs and passed to the sink as such. Warning: If the prefix overlaps a registered route template, the route will take precedence and mask the sink. (See also: :meth:`~.add_route`) """ if not self._ASGI and iscoroutinefunction(sink): raise CompatibilityError( 'The sink method must be a regular synchronous function ' 'in order to be used with a WSGI app.' ) if not hasattr(prefix, 'match'): # Assume it is a string prefix = re.compile(prefix) # NOTE(kgriffs): Insert at the head of the list such that # in the case of a duplicate prefix, the last one added # is preferred. self._sinks.insert(0, (prefix, sink, True)) self._update_sink_and_static_routes() def add_error_handler(self, exception, handler=None): """Register a handler for one or more exception types. Error handlers may be registered for any exception type, including :class:`~.HTTPError` or :class:`~.HTTPStatus`. This feature provides a central location for logging and otherwise handling exceptions raised by responders, hooks, and middleware components. A handler can raise an instance of :class:`~.HTTPError` or :class:`~.HTTPStatus` to communicate information about the issue to the client. Alternatively, a handler may modify `resp` directly. An error handler "matches" a raised exception if the exception is an instance of the corresponding exception type. If more than one error handler matches the raised exception, the framework will choose the most specific one, as determined by the method resolution order of the raised exception type. If multiple error handlers are registered for the *same* exception class, then the most recently-registered handler is used. For example, suppose we register error handlers as follows:: app = App() app.add_error_handler(falcon.HTTPNotFound, custom_handle_not_found) app.add_error_handler(falcon.HTTPError, custom_handle_http_error) app.add_error_handler(Exception, custom_handle_uncaught_exception) app.add_error_handler(falcon.HTTPNotFound, custom_handle_404) If an instance of ``falcon.HTTPForbidden`` is raised, it will be handled by ``custom_handle_http_error()``. ``falcon.HTTPError`` is a superclass of ``falcon.HTTPForbidden`` and a subclass of ``Exception``, so it is the most specific exception type with a registered handler. If an instance of ``falcon.HTTPNotFound`` is raised, it will be handled by ``custom_handle_404()``, not by ``custom_handle_not_found()``, because ``custom_handle_404()`` was registered more recently. .. Note:: By default, the framework installs three handlers, one for :class:`~.HTTPError`, one for :class:`~.HTTPStatus`, and one for the standard ``Exception`` type, which prevents passing uncaught exceptions to the WSGI server. These can be overridden by adding a custom error handler method for the exception type in question. Args: exception (type or iterable of types): When handling a request, whenever an error occurs that is an instance of the specified type(s), the associated handler will be called. Either a single type or an iterable of types may be specified. handler (callable): A function or callable object taking the form ``func(req, resp, ex, params)``. If not specified explicitly, the handler will default to ``exception.handle``, where ``exception`` is the error type specified above, and ``handle`` is a static method (i.e., decorated with ``@staticmethod``) that accepts the same params just described. For example:: class CustomException(CustomBaseException): @staticmethod def handle(req, resp, ex, params): # TODO: Log the error # Convert to an instance of falcon.HTTPError raise falcon.HTTPError(falcon.HTTP_792) If an iterable of exception types is specified instead of a single type, the handler must be explicitly specified. .. versionchanged:: 3.0 The error handler is now selected by the most-specific matching error class, rather than the most-recently registered matching error class. """ def wrap_old_handler(old_handler): # NOTE(kgriffs): This branch *is* actually tested by # test_error_handlers.test_handler_signature_shim_asgi() (as # verified manually via pdb), but for some reason coverage # tracking isn't picking it up. if iscoroutinefunction(old_handler): # pragma: no cover @wraps(old_handler) async def handler_async(req, resp, ex, params): await old_handler(ex, req, resp, params) return handler_async @wraps(old_handler) def handler(req, resp, ex, params): old_handler(ex, req, resp, params) return handler if handler is None: try: handler = exception.handle except AttributeError: raise AttributeError( 'handler must either be specified ' 'explicitly or defined as a static' 'method named "handle" that is a ' 'member of the given exception class.' ) # TODO(vytas): Remove this shimming in a future Falcon version. arg_names = tuple(misc.get_argnames(handler)) if arg_names[0:1] in ( ('e',), ('err',), ('error',), ('ex',), ('exception',), ) or arg_names[1:3] in (('req', 'resp'), ('request', 'response')): handler = wrap_old_handler(handler) try: exception_tuple = tuple(exception) except TypeError: exception_tuple = (exception,) for exc in exception_tuple: if not issubclass(exc, BaseException): raise TypeError('"exception" must be an exception type.') self._error_handlers[exc] = handler def set_error_serializer(self, serializer): """Override the default serializer for instances of :class:`~.HTTPError`. When a responder raises an instance of :class:`~.HTTPError`, Falcon converts it to an HTTP response automatically. The default serializer supports JSON and XML, but may be overridden by this method to use a custom serializer in order to support other media types. Note: If a custom media type is used and the type includes a "+json" or "+xml" suffix, the default serializer will convert the error to JSON or XML, respectively. Note: A custom serializer set with this method may not be called if the default error handler for :class:`~.HTTPError` has been overriden. See :meth:`~.add_error_handler` for more details. The :class:`~.HTTPError` class contains helper methods, such as `to_json()` and `to_dict()`, that can be used from within custom serializers. For example:: def my_serializer(req, resp, exception): representation = None preferred = req.client_prefers((falcon.MEDIA_YAML, falcon.MEDIA_JSON)) if preferred is not None: if preferred == falcon.MEDIA_JSON: resp.data = exception.to_json() else: resp.text = yaml.dump(exception.to_dict(), encoding=None) resp.content_type = preferred resp.append_header('Vary', 'Accept') Args: serializer (callable): A function taking the form ``func(req, resp, exception)``, where `req` is the request object that was passed to the responder method, `resp` is the response object, and `exception` is an instance of ``falcon.HTTPError``. """ self._serialize_error = serializer # ------------------------------------------------------------------------ # Helpers that require self # ------------------------------------------------------------------------ def _prepare_middleware(self, middleware=None, independent_middleware=False): return helpers.prepare_middleware( middleware=middleware, independent_middleware=independent_middleware ) def _get_responder(self, req): """Search routes for a matching responder. Args: req (Request): The request object. Returns: tuple: A 4-member tuple consisting of a responder callable, a ``dict`` containing parsed path fields (if any were specified in the matching route's URI template), a reference to the responder's resource instance, and the matching URI template. Note: If a responder was matched to the given URI, but the HTTP method was not found in the method_map for the responder, the responder callable element of the returned tuple will be `falcon.responder.bad_request`. Likewise, if no responder was matched for the given URI, then the responder callable element of the returned tuple will be `falcon.responder.path_not_found` """ path = req.path method = 'WEBSOCKET' if req.is_websocket else req.method uri_template = None route = self._router_search(path, req=req) if route is not None: try: resource, method_map, params, uri_template = route except ValueError: # NOTE(kgriffs): Older routers may not return the # template. But for performance reasons they should at # least return None if they don't support it. resource, method_map, params = route else: # NOTE(kgriffs): Older routers may indicate that no route # was found by returning (None, None, None). Therefore, we # normalize resource as the flag to indicate whether or not # a route was found, for the sake of backwards-compat. resource = None if resource is not None: try: responder = method_map[method] except KeyError: # NOTE(kgriffs): Dirty hack! We use __class__ here to avoid # binding self to the default responder method. We could # decorate the function itself with @staticmethod, but it # would perhaps be less obvious to the reader why this is # needed when just looking at the code in the reponder # module, so we just grab it directly here. responder = self.__class__._default_responder_bad_request else: params = {} for matcher, obj, is_sink in self._sink_and_static_routes: m = matcher.match(path) if m: if is_sink: params = m.groupdict() responder = obj break else: responder = self.__class__._default_responder_path_not_found return (responder, params, resource, uri_template) def _compose_status_response(self, req, resp, http_status): """Compose a response for the given HTTPStatus instance.""" # PERF(kgriffs): The code to set the status and headers is identical # to that used in _compose_error_response(), but refactoring in the # name of DRY isn't worth the extra CPU cycles. resp.status = http_status.status if http_status.headers is not None: resp.set_headers(http_status.headers) # NOTE(kgriffs): If http_status.text is None, that's OK because # it's acceptable to set resp.text to None (to indicate no body). resp.text = http_status.text def _compose_error_response(self, req, resp, error): """Compose a response for the given HTTPError instance.""" resp.status = error.status if error.headers is not None: resp.set_headers(error.headers) self._serialize_error(req, resp, error) def _http_status_handler(self, req, resp, status, params): self._compose_status_response(req, resp, status) def _http_error_handler(self, req, resp, error, params): self._compose_error_response(req, resp, error) def _python_error_handler(self, req, resp, error, params): req.log_error(traceback.format_exc()) self._compose_error_response(req, resp, HTTPInternalServerError()) def _find_error_handler(self, ex): # NOTE(csojinb): The `__mro__` class attribute returns the method # resolution order tuple, i.e. the complete linear inheritance chain # ``(type(ex), ..., object)``. For a valid exception class, the last # two entries in the tuple will always be ``BaseException``and # ``object``, so here we iterate over the lineage of exception types, # from most to least specific. # PERF(csojinb): The expression ``type(ex).__mro__[:-1]`` here is not # super readable, but we inline it to avoid function call overhead. for exc in type(ex).__mro__[:-1]: handler = self._error_handlers.get(exc) if handler is not None: return handler def _handle_exception(self, req, resp, ex, params): """Handle an exception raised from mw or a responder. Args: ex: Exception to handle req: Current request object to pass to the handler registered for the given exception type resp: Current response object to pass to the handler registered for the given exception type params: Responder params to pass to the handler registered for the given exception type Returns: bool: ``True`` if a handler was found and called for the exception, ``False`` otherwise. """ err_handler = self._find_error_handler(ex) # NOTE(caselit): Reset body, data and media before calling the handler resp.text = resp.data = resp.media = None if err_handler is not None: try: err_handler(req, resp, ex, params) except HTTPStatus as status: self._compose_status_response(req, resp, status) except HTTPError as error: self._compose_error_response(req, resp, error) return True # NOTE(kgriffs): No error handlers are defined for ex # and it is not one of (HTTPStatus, HTTPError), since it # would have matched one of the corresponding default # handlers. return False # PERF(kgriffs): Moved from api_helpers since it is slightly faster # to call using self, and this function is called for most # requests. def _get_body(self, resp, wsgi_file_wrapper=None): """Convert resp content into an iterable as required by PEP 333. Args: resp: Instance of falcon.Response wsgi_file_wrapper: Reference to wsgi.file_wrapper from the WSGI environ dict, if provided by the WSGI server. Used when resp.stream is a file-like object (default None). Returns: tuple: A two-member tuple of the form (iterable, content_length). The length is returned as ``None`` when unknown. The iterable is determined as follows: * If the result of render_body() is not ``None``, returns ([data], len(data)) * If resp.stream is not ``None``, returns resp.stream iterable using wsgi.file_wrapper, if necessary: (closeable_iterator, None) * Otherwise, returns ([], 0) """ data = resp.render_body() if data is not None: return [data], len(data) stream = resp.stream if stream is not None: # NOTE(kgriffs): Heuristic to quickly check if stream is # file-like. Not perfect, but should be good enough until # proven otherwise. if hasattr(stream, 'read'): if wsgi_file_wrapper is not None: # TODO(kgriffs): Make block size configurable at the # global level, pending experimentation to see how # useful that would be. See also the discussion on # this GitHub PR: http://goo.gl/XGrtDz iterable = wsgi_file_wrapper(stream, self._STREAM_BLOCK_SIZE) else: iterable = helpers.CloseableStreamIterator( stream, self._STREAM_BLOCK_SIZE ) else: iterable = stream return iterable, None return [], 0 def _update_sink_and_static_routes(self): if self._sink_before_static_route: self._sink_and_static_routes = tuple(self._sinks + self._static_routes) else: self._sink_and_static_routes = tuple(self._static_routes + self._sinks) # FILE falcon/falcon/inspect.py class RouteInfo(_Traversable): """Describes a route. Args: path (str): The path of this route. class_name (str): The class name of the responder of this route. source_info (str): The source path where this responder was defined. methods (List[RouteMethodInfo]): List of methods defined in the route. """ __visit_name__ = 'route' def __init__( self, path: str, class_name: str, source_info: str, methods: List[RouteMethodInfo], ): self.path = path self.class_name = class_name self.source_info = source_info self.methods = methods # FILE falcon/falcon/inspect.py def inspect_routes(app: App) -> 'List[RouteInfo]': """Inspects the routes of an application. Args: app (falcon.App): The application to inspect. Works with both :class:`falcon.App` and :class:`falcon.asgi.App`. Returns: List[RouteInfo]: A list of route descriptions for the application. """ router = app._router inspect_function = _supported_routers.get(type(router)) if inspect_function is None: raise TypeError( 'Unsupported router class {}. Use "register_router" ' 'to register a function that can inspect the router ' 'used by the provided application'.format(type(router)) ) return inspect_function(router) # FILE falcon/falcon/inspect.py _supported_routers = {} # type: Dict[Type, Callable] Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/inspect.py from functools import partial import inspect from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Type from falcon.app import App from falcon.routing import CompiledRouter from falcon import app_helpers def register_router(router_class): """Register a function to inspect a particular router. This decorator registers a new function for a custom router class, so that it can be inspected with the function :func:`.inspect_routes`. An inspection function takes the router instance used by the application and returns a list of :class:`.RouteInfo`. Eg:: @register_router(MyRouterClass) def inspect_my_router(router): return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])] Args: router_class (Type): The router class to register. If already registered an error will be raised. """
falcon/falcon/inspect.py
falcon.inspect.inspect_compiled_router
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE falcon/falcon/inspect.py def inspect_app(app: App) -> 'AppInfo': """Inspects an application. Args: app (falcon.App): The application to inspect. Works with both :class:`falcon.App` and :class:`falcon.asgi.App`. Returns: AppInfo: The information regarding the application. Call :meth:`~.AppInfo.to_string` on the result to obtain a human-friendly representation. """ routes = inspect_routes(app) static = inspect_static_routes(app) sinks = inspect_sinks(app) error_handlers = inspect_error_handlers(app) middleware = inspect_middleware(app) return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI) # FILE falcon/falcon/inspect.py class StringVisitor(InspectVisitor): """Visitor that returns a string representation of the info class. This is used automatically by calling ``to_string()`` on the info class. It can also be used directly by calling ``StringVisitor.process(info_instance)``. Args: verbose (bool, optional): Adds more information. Defaults to ``False``. internal (bool, optional): Also include internal route methods and error handlers added by the framework. Defaults to ``False``. name (str, optional): The name of the application, to be output at the beginning of the text. Defaults to ``'Falcon App'``. """ def __init__(self, verbose=False, internal=False, name=''): self.verbose = verbose self.internal = internal self.name = name self.indent = 0 def tab(self): """Get the current tabulation.""" ... def visit_route_method(self, route_method: RouteMethodInfo) -> str: """Visit a RouteMethodInfo instance. Usually called by `process`.""" ... def _methods_to_string(self, methods: List): """Return a string from the list of methods.""" ... def visit_route(self, route: RouteInfo) -> str: """Visit a RouteInfo instance. Usually called by `process`.""" text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route) if self.verbose: text += ' ({0.source_info})'.format(route) method_text = self._methods_to_string(route.methods) if not method_text: return text return '{}:\n{}'.format(text, method_text) def visit_static_route(self, static_route: StaticRouteInfo) -> str: """Visit a StaticRouteInfo instance. Usually called by `process`.""" ... def visit_sink(self, sink: SinkInfo) -> str: """Visit a SinkInfo instance. Usually called by `process`.""" ... def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str: """Visit a ErrorHandlerInfo instance. Usually called by `process`.""" ... def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str: """Visit a MiddlewareMethodInfo instance. Usually called by `process`.""" ... def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str: """Visit a ErrorHandlerInfo instance. Usually called by `process`.""" ... def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str: """Visit a MiddlewareTreeItemInfo instance. Usually called by `process`.""" ... def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str: """Visit a MiddlewareTreeInfo instance. Usually called by `process`.""" ... def visit_middleware(self, middleware: MiddlewareInfo) -> str: """Visit a MiddlewareInfo instance. Usually called by `process`.""" ... def visit_app(self, app: AppInfo) -> str: """Visit a AppInfo instance. Usually called by `process`.""" ... # FILE falcon/falcon/inspect.py class StringVisitor(InspectVisitor): """Visitor that returns a string representation of the info class. This is used automatically by calling ``to_string()`` on the info class. It can also be used directly by calling ``StringVisitor.process(info_instance)``. Args: verbose (bool, optional): Adds more information. Defaults to ``False``. internal (bool, optional): Also include internal route methods and error handlers added by the framework. Defaults to ``False``. name (str, optional): The name of the application, to be output at the beginning of the text. Defaults to ``'Falcon App'``. """ def __init__(self, verbose=False, internal=False, name=''): self.verbose = verbose self.internal = internal self.name = name self.indent = 0 def tab(self): """Get the current tabulation.""" ... def visit_route_method(self, route_method: RouteMethodInfo) -> str: """Visit a RouteMethodInfo instance. Usually called by `process`.""" text = '{0.method} - {0.function_name}'.format(route_method) if self.verbose: text += ' ({0.source_info})'.format(route_method) return text def _methods_to_string(self, methods: List): """Return a string from the list of methods.""" ... def visit_route(self, route: RouteInfo) -> str: """Visit a RouteInfo instance. Usually called by `process`.""" ... def visit_static_route(self, static_route: StaticRouteInfo) -> str: """Visit a StaticRouteInfo instance. Usually called by `process`.""" ... def visit_sink(self, sink: SinkInfo) -> str: """Visit a SinkInfo instance. Usually called by `process`.""" ... def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str: """Visit a ErrorHandlerInfo instance. Usually called by `process`.""" ... def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str: """Visit a MiddlewareMethodInfo instance. Usually called by `process`.""" ... def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str: """Visit a ErrorHandlerInfo instance. Usually called by `process`.""" ... def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str: """Visit a MiddlewareTreeItemInfo instance. Usually called by `process`.""" ... def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str: """Visit a MiddlewareTreeInfo instance. Usually called by `process`.""" ... def visit_middleware(self, middleware: MiddlewareInfo) -> str: """Visit a MiddlewareInfo instance. Usually called by `process`.""" ... def visit_app(self, app: AppInfo) -> str: """Visit a AppInfo instance. Usually called by `process`.""" ... # FILE falcon/falcon/inspect.py class RouteInfo(_Traversable): """Describes a route. Args: path (str): The path of this route. class_name (str): The class name of the responder of this route. source_info (str): The source path where this responder was defined. methods (List[RouteMethodInfo]): List of methods defined in the route. """ def __init__( self, path: str, class_name: str, source_info: str, methods: List[RouteMethodInfo], ): self.path = path self.class_name = class_name self.source_info = source_info self.methods = methods # FILE falcon/falcon/inspect.py class MiddlewareClassInfo(_Traversable): """Describes a middleware class. Args: name (str): The name of the middleware class. source_info (str): The source path where the middleware was defined. methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class. """ def __init__( self, name: str, source_info: str, methods: List[MiddlewareMethodInfo] ): self.name = name self.source_info = source_info self.methods = methods # FILE falcon/falcon/inspect.py class RouteInfo(_Traversable): """Describes a route. Args: path (str): The path of this route. class_name (str): The class name of the responder of this route. source_info (str): The source path where this responder was defined. methods (List[RouteMethodInfo]): List of methods defined in the route. """ __visit_name__ = 'route' def __init__( self, path: str, class_name: str, source_info: str, methods: List[RouteMethodInfo], ): self.path = path self.class_name = class_name self.source_info = source_info self.methods = methods # FILE falcon/falcon/inspect.py class RouteMethodInfo(_Traversable): """Describes a responder method. Args: method (str): The HTTP method of this responder. source_info (str): The source path of this function. function_name (str): Name of the function. internal (bool): Whether or not this was a default responder added by the framework. Attributes: suffix (str): The suffix of this route function. This is set to an empty string when the function has no suffix. """ __visit_name__ = 'route_method' def __init__( self, method: str, source_info: str, function_name: str, internal: bool ): self.method = method self.source_info = source_info self.function_name = function_name self.internal = internal # NOTE(CaselIT): internal falcon names do not start with 'on' # and do not have suffix if function_name.startswith('on'): self.suffix = '_'.join(function_name.split('_')[2:]) else: self.suffix = '' # FILE falcon/falcon/app.py class App: """This class is the main entry point into a Falcon-based WSGI app. Each App instance provides a callable `WSGI <https://www.python.org/dev/peps/pep-3333/>`_ interface and a routing engine (for ASGI applications, see :class:`falcon.asgi.App`). Note: The ``API`` class was renamed to ``App`` in Falcon 3.0. The old class name remains available as an alias for backwards-compatibility, but will be removed in a future release. Keyword Arguments: media_type (str): Default media type to use when initializing :py:class:`~.RequestOptions` and :py:class:`~.ResponseOptions`. The ``falcon`` module provides a number of constants for common media types, such as ``falcon.MEDIA_MSGPACK``, ``falcon.MEDIA_YAML``, ``falcon.MEDIA_XML``, etc. middleware: Either a single middleware component object or an iterable of objects (instantiated classes) that implement the following middleware component interface. Note that it is only necessary to implement the methods for the events you would like to handle; Falcon simply skips over any missing middleware methods:: class ExampleComponent: def process_request(self, req, resp): \"\"\"Process the request before routing it. Note: Because Falcon routes each request based on req.path, a request can be effectively re-routed by setting that attribute to a new value from within process_request(). Args: req: Request object that will eventually be routed to an on_* responder method. resp: Response object that will be routed to the on_* responder. \"\"\" def process_resource(self, req, resp, resource, params): \"\"\"Process the request and resource *after* routing. Note: This method is only called when the request matches a route to a resource. Args: req: Request object that will be passed to the routed responder. resp: Response object that will be passed to the responder. resource: Resource object to which the request was routed. May be None if no route was found for the request. params: A dict-like object representing any additional params derived from the route's URI template fields, that will be passed to the resource's responder method as keyword arguments. \"\"\" def process_response(self, req, resp, resource, req_succeeded) \"\"\"Post-processing of the response (after routing). Args: req: Request object. resp: Response object. resource: Resource object to which the request was routed. May be None if no route was found for the request. req_succeeded: True if no exceptions were raised while the framework processed and routed the request; otherwise False. \"\"\" (See also: :ref:`Middleware <middleware>`) request_type: ``Request``-like class to use instead of Falcon's default class. Among other things, this feature affords inheriting from :class:`falcon.Request` in order to override the ``context_type`` class variable (default: :class:`falcon.Request`) response_type: ``Response``-like class to use instead of Falcon's default class (default: :class:`falcon.Response`) router (object): An instance of a custom router to use in lieu of the default engine. (See also: :ref:`Custom Routers <routing_custom>`) independent_middleware (bool): Set to ``False`` if response middleware should not be executed independently of whether or not request middleware raises an exception (default ``True``). When this option is set to ``False``, a middleware component's ``process_response()`` method will NOT be called when that same component's ``process_request()`` (or that of a component higher up in the stack) raises an exception. cors_enable (bool): Set this flag to ``True`` to enable a simple CORS policy for all responses, including support for preflighted requests. An instance of :py:class:`~.CORSMiddleware` can instead be passed to the middleware argument to customize its behaviour. (default ``False``). (See also: :ref:`CORS <cors>`) sink_before_static_route (bool): Indicates if the sinks should be processed before (when ``True``) or after (when ``False``) the static routes. This has an effect only if no route was matched. (default ``True``) Attributes: req_options: A set of behavioral options related to incoming requests. (See also: :py:class:`~.RequestOptions`) resp_options: A set of behavioral options related to outgoing responses. (See also: :py:class:`~.ResponseOptions`) router_options: Configuration options for the router. If a custom router is in use, and it does not expose any configurable options, referencing this attribute will raise an instance of ``AttributeError``. (See also: :ref:`CompiledRouterOptions <compiled_router_options>`) """ _META_METHODS = frozenset(constants._META_METHODS) _STREAM_BLOCK_SIZE = 8 * 1024 # 8 KiB _STATIC_ROUTE_TYPE = routing.StaticRoute # NOTE(kgriffs): This makes it easier to tell what we are dealing with # without having to import falcon.asgi to get at the falcon.asgi.App # type (which we may not be able to do under Python 3.5). _ASGI = False # NOTE(kgriffs): We do it like this rather than just implementing the # methods directly on the class, so that we keep all the default # responders colocated in the same module. This will make it more # likely that the implementations of the async and non-async versions # of the methods are kept in sync (pun intended). _default_responder_bad_request = responders.bad_request _default_responder_path_not_found = responders.path_not_found __slots__ = ( '_cors_enable', '_error_handlers', '_independent_middleware', '_middleware', # NOTE(kgriffs): WebSocket is currently only supported for # ASGI apps, but we may add support for WSGI at some point. '_middleware_ws', '_request_type', '_response_type', '_router_search', '_router', '_serialize_error', '_sink_and_static_routes', '_sink_before_static_route', '_sinks', '_static_routes', '_unprepared_middleware', 'req_options', 'resp_options', ) def __init__( self, media_type=constants.DEFAULT_MEDIA_TYPE, request_type=Request, response_type=Response, middleware=None, router=None, independent_middleware=True, cors_enable=False, sink_before_static_route=True, ): self._sink_before_static_route = sink_before_static_route self._sinks = [] self._static_routes = [] self._sink_and_static_routes = () if cors_enable: cm = CORSMiddleware() if middleware is None: middleware = [cm] else: try: # NOTE(kgriffs): Check to see if middleware is an # iterable, and if so, append the CORSMiddleware # instance. iter(middleware) middleware = list(middleware) middleware.append(cm) except TypeError: # NOTE(kgriffs): Assume the middleware kwarg references # a single middleware component. middleware = [middleware, cm] # set middleware self._unprepared_middleware = [] self._independent_middleware = independent_middleware self.add_middleware(middleware) self._router = router or routing.DefaultRouter() self._router_search = self._router.find self._request_type = request_type self._response_type = response_type self._error_handlers = {} self._serialize_error = helpers.default_serialize_error self.req_options = RequestOptions() self.resp_options = ResponseOptions() self.req_options.default_media_type = media_type self.resp_options.default_media_type = media_type # NOTE(kgriffs): Add default error handlers self.add_error_handler(Exception, self._python_error_handler) self.add_error_handler(HTTPError, self._http_error_handler) self.add_error_handler(HTTPStatus, self._http_status_handler) def __call__(self, env, start_response): # noqa: C901 """WSGI `app` method. Makes instances of App callable from a WSGI server. May be used to host an App or called directly in order to simulate requests when testing the App. (See also: PEP 3333) Args: env (dict): A WSGI environment dictionary start_response (callable): A WSGI helper function for setting status and headers on a response. """ req = self._request_type(env, options=self.req_options) resp = self._response_type(options=self.resp_options) resource = None responder = None params = {} dependent_mw_resp_stack = [] mw_req_stack, mw_rsrc_stack, mw_resp_stack = self._middleware req_succeeded = False try: if req.method in self._META_METHODS: raise HTTPBadRequest() # NOTE(ealogar): The execution of request middleware # should be before routing. This will allow request mw # to modify the path. # NOTE: if flag set to use independent middleware, execute # request middleware independently. Otherwise, only queue # response middleware after request middleware succeeds. if self._independent_middleware: for process_request in mw_req_stack: process_request(req, resp) if resp.complete: break else: for process_request, process_response in mw_req_stack: if process_request and not resp.complete: process_request(req, resp) if process_response: dependent_mw_resp_stack.insert(0, process_response) if not resp.complete: # NOTE(warsaw): Moved this to inside the try except # because it is possible when using object-based # traversal for _get_responder() to fail. An example is # a case where an object does not have the requested # next-hop child resource. In that case, the object # being asked to dispatch to its child will raise an # HTTP exception signalling the problem, e.g. a 404. responder, params, resource, req.uri_template = self._get_responder(req) except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise else: try: # NOTE(kgriffs): If the request did not match any # route, a default responder is returned and the # resource is None. In that case, we skip the # resource middleware methods. Resource will also be # None when a middleware method already set # resp.complete to True. if resource: # Call process_resource middleware methods. for process_resource in mw_rsrc_stack: process_resource(req, resp, resource, params) if resp.complete: break if not resp.complete: responder(req, resp, **params) req_succeeded = True except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise # Call process_response middleware methods. for process_response in mw_resp_stack or dependent_mw_resp_stack: try: process_response(req, resp, resource, req_succeeded) except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise req_succeeded = False body = [] length = 0 try: body, length = self._get_body(resp, env.get('wsgi.file_wrapper')) except Exception as ex: if not self._handle_exception(req, resp, ex, params): raise req_succeeded = False resp_status = code_to_http_status(resp.status) default_media_type = self.resp_options.default_media_type if req.method == 'HEAD' or resp_status in _BODILESS_STATUS_CODES: body = [] # PERF(vytas): move check for the less common and much faster path # of resp_status being in {204, 304} here; NB: this builds on the # assumption _TYPELESS_STATUS_CODES <= _BODILESS_STATUS_CODES. # NOTE(kgriffs): Based on wsgiref.validate's interpretation of # RFC 2616, as commented in that module's source code. The # presence of the Content-Length header is not similarly # enforced. if resp_status in _TYPELESS_STATUS_CODES: default_media_type = None elif ( length is not None and req.method == 'HEAD' and resp_status not in _BODILESS_STATUS_CODES and 'content-length' not in resp._headers ): # NOTE(kgriffs): We really should be returning a Content-Length # in this case according to my reading of the RFCs. By # optionally using len(data) we let a resource simulate HEAD # by turning around and calling it's own on_get(). resp._headers['content-length'] = str(length) else: # PERF(kgriffs): Böse mußt sein. Operate directly on resp._headers # to reduce overhead since this is a hot/critical code path. # NOTE(kgriffs): We always set content-length to match the # body bytes length, even if content-length is already set. The # reason being that web servers and LBs behave unpredictably # when the header doesn't match the body (sometimes choosing to # drop the HTTP connection prematurely, for example). if length is not None: resp._headers['content-length'] = str(length) headers = resp._wsgi_headers(default_media_type) # Return the response per the WSGI spec. start_response(resp_status, headers) return body @property def router_options(self): return self._router.options def add_middleware(self, middleware): """Add one or more additional middleware components. Arguments: middleware: Either a single middleware component or an iterable of components to add. The component(s) will be invoked, in order, as if they had been appended to the original middleware list passed to the class initializer. """ # NOTE(kgriffs): Since this is called by the initializer, there is # the chance that middleware may be None. if middleware: try: self._unprepared_middleware += middleware except TypeError: # middleware is not iterable; assume it is just one bare component self._unprepared_middleware.append(middleware) # NOTE(kgriffs): Even if middleware is None or an empty list, we still # need to make sure self._middleware is initialized if this is the # first call to add_middleware(). self._middleware = self._prepare_middleware( self._unprepared_middleware, independent_middleware=self._independent_middleware, ) def add_route(self, uri_template, resource, **kwargs): """Associate a templatized URI path with a resource. Falcon routes incoming requests to resources based on a set of URI templates. If the path requested by the client matches the template for a given route, the request is then passed on to the associated resource for processing. Note: If no route matches the request, control then passes to a default responder that simply raises an instance of :class:`~.HTTPRouteNotFound`. By default, this error will be rendered as a 404 response, but this behavior can be modified by adding a custom error handler (see also :ref:`this FAQ topic <faq_override_404_500_handlers>`). On the other hand, if a route is matched but the resource does not implement a responder for the requested HTTP method, the framework invokes a default responder that raises an instance of :class:`~.HTTPMethodNotAllowed`. This method delegates to the configured router's ``add_route()`` method. To override the default behavior, pass a custom router object to the :class:`~.App` initializer. (See also: :ref:`Routing <routing>`) Args: uri_template (str): A templatized URI. Care must be taken to ensure the template does not mask any sink patterns, if any are registered. (See also: :meth:`~.App.add_sink`) Warning: If :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is enabled, `uri_template` should be provided without a trailing slash. (See also: :ref:`trailing_slash_in_path`) resource (instance): Object which represents a REST resource. Falcon will pass GET requests to ``on_get()``, PUT requests to ``on_put()``, etc. If any HTTP methods are not supported by your resource, simply don't define the corresponding request handlers, and Falcon will do the right thing. Note: When using an async version of the ``App``, all request handlers must be awaitable coroutine functions. Keyword Args: suffix (str): Optional responder name suffix for this route. If a suffix is provided, Falcon will map GET requests to ``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``, etc. In this way, multiple closely-related routes can be mapped to the same resource. For example, a single resource class can use suffixed responders to distinguish requests for a single item vs. a collection of those same items. Another class might use a suffixed responder to handle a shortlink route in addition to the regular route for the resource. For example:: class Baz(object): def on_get_foo(self, req, resp): pass def on_get_bar(self, req, resp): pass baz = Baz() app = falcon.App() app.add_route('/foo', baz, suffix='foo') app.add_route('/bar', baz, suffix='bar') compile (bool): Optional flag that can be provided when using the default :class:`.CompiledRouter` to compile the routing logic on this call, since it will otherwise delay compilation until the first request is routed. See :meth:`.CompiledRouter.add_route` for further details. Note: Any additional keyword arguments not defined above are passed through to the underlying router's ``add_route()`` method. The default router ignores any additional keyword arguments, but custom routers may take advantage of this feature to receive additional options when setting up routes. Custom routers MUST accept such arguments using the variadic pattern (``**kwargs``), and ignore any keyword arguments that they don't support. """ # NOTE(richardolsson): Doing the validation here means it doesn't have # to be duplicated in every future router implementation. if not isinstance(uri_template, str): raise TypeError('uri_template is not a string') if not uri_template.startswith('/'): raise ValueError("uri_template must start with '/'") if '//' in uri_template: raise ValueError("uri_template may not contain '//'") self._router.add_route(uri_template, resource, **kwargs) def add_static_route( self, prefix, directory, downloadable=False, fallback_filename=None ): """Add a route to a directory of static files. Static routes provide a way to serve files directly. This feature provides an alternative to serving files at the web server level when you don't have that option, when authorization is required, or for testing purposes. Warning: Serving files directly from the web server, rather than through the Python app, will always be more efficient, and therefore should be preferred in production deployments. For security reasons, the directory and the fallback_filename (if provided) should be read only for the account running the application. Warning: If you need to serve large files and/or progressive downloads (such as in the case of video streaming) through the Falcon app, check that your application server's timeout settings can accomodate the expected request duration (for instance, the popular Gunicorn kills ``sync`` workers after 30 seconds unless configured otherwise). Note: For ASGI apps, file reads are made non-blocking by scheduling them on the default executor. Static routes are matched in LIFO order. Therefore, if the same prefix is used for two routes, the second one will override the first. This also means that more specific routes should be added *after* less specific ones. For example, the following sequence would result in ``'/foo/bar/thing.js'`` being mapped to the ``'/foo/bar'`` route, and ``'/foo/xyz/thing.js'`` being mapped to the ``'/foo'`` route:: app.add_static_route('/foo', foo_path) app.add_static_route('/foo/bar', foobar_path) Args: prefix (str): The path prefix to match for this route. If the path in the requested URI starts with this string, the remainder of the path will be appended to the source directory to determine the file to serve. This is done in a secure manner to prevent an attacker from requesting a file outside the specified directory. Note that static routes are matched in LIFO order, and are only attempted after checking dynamic routes and sinks. directory (Union[str, pathlib.Path]): The source directory from which to serve files. downloadable (bool): Set to ``True`` to include a Content-Disposition header in the response. The "filename" directive is simply set to the name of the requested file. fallback_filename (str): Fallback filename used when the requested file is not found. Can be a relative path inside the prefix folder or any valid absolute path. """ sr = self._STATIC_ROUTE_TYPE( prefix, directory, downloadable=downloadable, fallback_filename=fallback_filename, ) self._static_routes.insert(0, (sr, sr, False)) self._update_sink_and_static_routes() def add_sink(self, sink, prefix=r'/'): """Register a sink method for the App. If no route matches a request, but the path in the requested URI matches a sink prefix, Falcon will pass control to the associated sink, regardless of the HTTP method requested. Using sinks, you can drain and dynamically handle a large number of routes, when creating static resources and responders would be impractical. For example, you might use a sink to create a smart proxy that forwards requests to one or more backend services. Args: sink (callable): A callable taking the form ``func(req, resp, **kwargs)``. Note: When using an async version of the ``App``, this must be a coroutine. prefix (str): A regex string, typically starting with '/', which will trigger the sink if it matches the path portion of the request's URI. Both strings and precompiled regex objects may be specified. Characters are matched starting at the beginning of the URI path. Note: Named groups are converted to kwargs and passed to the sink as such. Warning: If the prefix overlaps a registered route template, the route will take precedence and mask the sink. (See also: :meth:`~.add_route`) """ if not self._ASGI and iscoroutinefunction(sink): raise CompatibilityError( 'The sink method must be a regular synchronous function ' 'in order to be used with a WSGI app.' ) if not hasattr(prefix, 'match'): # Assume it is a string prefix = re.compile(prefix) # NOTE(kgriffs): Insert at the head of the list such that # in the case of a duplicate prefix, the last one added # is preferred. self._sinks.insert(0, (prefix, sink, True)) self._update_sink_and_static_routes() def add_error_handler(self, exception, handler=None): """Register a handler for one or more exception types. Error handlers may be registered for any exception type, including :class:`~.HTTPError` or :class:`~.HTTPStatus`. This feature provides a central location for logging and otherwise handling exceptions raised by responders, hooks, and middleware components. A handler can raise an instance of :class:`~.HTTPError` or :class:`~.HTTPStatus` to communicate information about the issue to the client. Alternatively, a handler may modify `resp` directly. An error handler "matches" a raised exception if the exception is an instance of the corresponding exception type. If more than one error handler matches the raised exception, the framework will choose the most specific one, as determined by the method resolution order of the raised exception type. If multiple error handlers are registered for the *same* exception class, then the most recently-registered handler is used. For example, suppose we register error handlers as follows:: app = App() app.add_error_handler(falcon.HTTPNotFound, custom_handle_not_found) app.add_error_handler(falcon.HTTPError, custom_handle_http_error) app.add_error_handler(Exception, custom_handle_uncaught_exception) app.add_error_handler(falcon.HTTPNotFound, custom_handle_404) If an instance of ``falcon.HTTPForbidden`` is raised, it will be handled by ``custom_handle_http_error()``. ``falcon.HTTPError`` is a superclass of ``falcon.HTTPForbidden`` and a subclass of ``Exception``, so it is the most specific exception type with a registered handler. If an instance of ``falcon.HTTPNotFound`` is raised, it will be handled by ``custom_handle_404()``, not by ``custom_handle_not_found()``, because ``custom_handle_404()`` was registered more recently. .. Note:: By default, the framework installs three handlers, one for :class:`~.HTTPError`, one for :class:`~.HTTPStatus`, and one for the standard ``Exception`` type, which prevents passing uncaught exceptions to the WSGI server. These can be overridden by adding a custom error handler method for the exception type in question. Args: exception (type or iterable of types): When handling a request, whenever an error occurs that is an instance of the specified type(s), the associated handler will be called. Either a single type or an iterable of types may be specified. handler (callable): A function or callable object taking the form ``func(req, resp, ex, params)``. If not specified explicitly, the handler will default to ``exception.handle``, where ``exception`` is the error type specified above, and ``handle`` is a static method (i.e., decorated with ``@staticmethod``) that accepts the same params just described. For example:: class CustomException(CustomBaseException): @staticmethod def handle(req, resp, ex, params): # TODO: Log the error # Convert to an instance of falcon.HTTPError raise falcon.HTTPError(falcon.HTTP_792) If an iterable of exception types is specified instead of a single type, the handler must be explicitly specified. .. versionchanged:: 3.0 The error handler is now selected by the most-specific matching error class, rather than the most-recently registered matching error class. """ def wrap_old_handler(old_handler): # NOTE(kgriffs): This branch *is* actually tested by # test_error_handlers.test_handler_signature_shim_asgi() (as # verified manually via pdb), but for some reason coverage # tracking isn't picking it up. if iscoroutinefunction(old_handler): # pragma: no cover @wraps(old_handler) async def handler_async(req, resp, ex, params): await old_handler(ex, req, resp, params) return handler_async @wraps(old_handler) def handler(req, resp, ex, params): old_handler(ex, req, resp, params) return handler if handler is None: try: handler = exception.handle except AttributeError: raise AttributeError( 'handler must either be specified ' 'explicitly or defined as a static' 'method named "handle" that is a ' 'member of the given exception class.' ) # TODO(vytas): Remove this shimming in a future Falcon version. arg_names = tuple(misc.get_argnames(handler)) if arg_names[0:1] in ( ('e',), ('err',), ('error',), ('ex',), ('exception',), ) or arg_names[1:3] in (('req', 'resp'), ('request', 'response')): handler = wrap_old_handler(handler) try: exception_tuple = tuple(exception) except TypeError: exception_tuple = (exception,) for exc in exception_tuple: if not issubclass(exc, BaseException): raise TypeError('"exception" must be an exception type.') self._error_handlers[exc] = handler def set_error_serializer(self, serializer): """Override the default serializer for instances of :class:`~.HTTPError`. When a responder raises an instance of :class:`~.HTTPError`, Falcon converts it to an HTTP response automatically. The default serializer supports JSON and XML, but may be overridden by this method to use a custom serializer in order to support other media types. Note: If a custom media type is used and the type includes a "+json" or "+xml" suffix, the default serializer will convert the error to JSON or XML, respectively. Note: A custom serializer set with this method may not be called if the default error handler for :class:`~.HTTPError` has been overriden. See :meth:`~.add_error_handler` for more details. The :class:`~.HTTPError` class contains helper methods, such as `to_json()` and `to_dict()`, that can be used from within custom serializers. For example:: def my_serializer(req, resp, exception): representation = None preferred = req.client_prefers((falcon.MEDIA_YAML, falcon.MEDIA_JSON)) if preferred is not None: if preferred == falcon.MEDIA_JSON: resp.data = exception.to_json() else: resp.text = yaml.dump(exception.to_dict(), encoding=None) resp.content_type = preferred resp.append_header('Vary', 'Accept') Args: serializer (callable): A function taking the form ``func(req, resp, exception)``, where `req` is the request object that was passed to the responder method, `resp` is the response object, and `exception` is an instance of ``falcon.HTTPError``. """ self._serialize_error = serializer # ------------------------------------------------------------------------ # Helpers that require self # ------------------------------------------------------------------------ def _prepare_middleware(self, middleware=None, independent_middleware=False): return helpers.prepare_middleware( middleware=middleware, independent_middleware=independent_middleware ) def _get_responder(self, req): """Search routes for a matching responder. Args: req (Request): The request object. Returns: tuple: A 4-member tuple consisting of a responder callable, a ``dict`` containing parsed path fields (if any were specified in the matching route's URI template), a reference to the responder's resource instance, and the matching URI template. Note: If a responder was matched to the given URI, but the HTTP method was not found in the method_map for the responder, the responder callable element of the returned tuple will be `falcon.responder.bad_request`. Likewise, if no responder was matched for the given URI, then the responder callable element of the returned tuple will be `falcon.responder.path_not_found` """ path = req.path method = 'WEBSOCKET' if req.is_websocket else req.method uri_template = None route = self._router_search(path, req=req) if route is not None: try: resource, method_map, params, uri_template = route except ValueError: # NOTE(kgriffs): Older routers may not return the # template. But for performance reasons they should at # least return None if they don't support it. resource, method_map, params = route else: # NOTE(kgriffs): Older routers may indicate that no route # was found by returning (None, None, None). Therefore, we # normalize resource as the flag to indicate whether or not # a route was found, for the sake of backwards-compat. resource = None if resource is not None: try: responder = method_map[method] except KeyError: # NOTE(kgriffs): Dirty hack! We use __class__ here to avoid # binding self to the default responder method. We could # decorate the function itself with @staticmethod, but it # would perhaps be less obvious to the reader why this is # needed when just looking at the code in the reponder # module, so we just grab it directly here. responder = self.__class__._default_responder_bad_request else: params = {} for matcher, obj, is_sink in self._sink_and_static_routes: m = matcher.match(path) if m: if is_sink: params = m.groupdict() responder = obj break else: responder = self.__class__._default_responder_path_not_found return (responder, params, resource, uri_template) def _compose_status_response(self, req, resp, http_status): """Compose a response for the given HTTPStatus instance.""" # PERF(kgriffs): The code to set the status and headers is identical # to that used in _compose_error_response(), but refactoring in the # name of DRY isn't worth the extra CPU cycles. resp.status = http_status.status if http_status.headers is not None: resp.set_headers(http_status.headers) # NOTE(kgriffs): If http_status.text is None, that's OK because # it's acceptable to set resp.text to None (to indicate no body). resp.text = http_status.text def _compose_error_response(self, req, resp, error): """Compose a response for the given HTTPError instance.""" resp.status = error.status if error.headers is not None: resp.set_headers(error.headers) self._serialize_error(req, resp, error) def _http_status_handler(self, req, resp, status, params): self._compose_status_response(req, resp, status) def _http_error_handler(self, req, resp, error, params): self._compose_error_response(req, resp, error) def _python_error_handler(self, req, resp, error, params): req.log_error(traceback.format_exc()) self._compose_error_response(req, resp, HTTPInternalServerError()) def _find_error_handler(self, ex): # NOTE(csojinb): The `__mro__` class attribute returns the method # resolution order tuple, i.e. the complete linear inheritance chain # ``(type(ex), ..., object)``. For a valid exception class, the last # two entries in the tuple will always be ``BaseException``and # ``object``, so here we iterate over the lineage of exception types, # from most to least specific. # PERF(csojinb): The expression ``type(ex).__mro__[:-1]`` here is not # super readable, but we inline it to avoid function call overhead. for exc in type(ex).__mro__[:-1]: handler = self._error_handlers.get(exc) if handler is not None: return handler def _handle_exception(self, req, resp, ex, params): """Handle an exception raised from mw or a responder. Args: ex: Exception to handle req: Current request object to pass to the handler registered for the given exception type resp: Current response object to pass to the handler registered for the given exception type params: Responder params to pass to the handler registered for the given exception type Returns: bool: ``True`` if a handler was found and called for the exception, ``False`` otherwise. """ err_handler = self._find_error_handler(ex) # NOTE(caselit): Reset body, data and media before calling the handler resp.text = resp.data = resp.media = None if err_handler is not None: try: err_handler(req, resp, ex, params) except HTTPStatus as status: self._compose_status_response(req, resp, status) except HTTPError as error: self._compose_error_response(req, resp, error) return True # NOTE(kgriffs): No error handlers are defined for ex # and it is not one of (HTTPStatus, HTTPError), since it # would have matched one of the corresponding default # handlers. return False # PERF(kgriffs): Moved from api_helpers since it is slightly faster # to call using self, and this function is called for most # requests. def _get_body(self, resp, wsgi_file_wrapper=None): """Convert resp content into an iterable as required by PEP 333. Args: resp: Instance of falcon.Response wsgi_file_wrapper: Reference to wsgi.file_wrapper from the WSGI environ dict, if provided by the WSGI server. Used when resp.stream is a file-like object (default None). Returns: tuple: A two-member tuple of the form (iterable, content_length). The length is returned as ``None`` when unknown. The iterable is determined as follows: * If the result of render_body() is not ``None``, returns ([data], len(data)) * If resp.stream is not ``None``, returns resp.stream iterable using wsgi.file_wrapper, if necessary: (closeable_iterator, None) * Otherwise, returns ([], 0) """ data = resp.render_body() if data is not None: return [data], len(data) stream = resp.stream if stream is not None: # NOTE(kgriffs): Heuristic to quickly check if stream is # file-like. Not perfect, but should be good enough until # proven otherwise. if hasattr(stream, 'read'): if wsgi_file_wrapper is not None: # TODO(kgriffs): Make block size configurable at the # global level, pending experimentation to see how # useful that would be. See also the discussion on # this GitHub PR: http://goo.gl/XGrtDz iterable = wsgi_file_wrapper(stream, self._STREAM_BLOCK_SIZE) else: iterable = helpers.CloseableStreamIterator( stream, self._STREAM_BLOCK_SIZE ) else: iterable = stream return iterable, None return [], 0 def _update_sink_and_static_routes(self): if self._sink_before_static_route: self._sink_and_static_routes = tuple(self._sinks + self._static_routes) else: self._sink_and_static_routes = tuple(self._static_routes + self._sinks) # FILE falcon/falcon/routing/compiled.py class CompiledRouter: """Fast URI router which compiles its routing logic to Python code. Generally you do not need to use this router class directly, as an instance is created by default when the falcon.App class is initialized. The router treats URI paths as a tree of URI segments and searches by checking the URI one segment at a time. Instead of interpreting the route tree for each look-up, it generates inlined, bespoke Python code to perform the search, then compiles that code. This makes the route processing quite fast. The compilation process is delayed until the first use of the router (on the first routed request) to reduce the time it takes to start the application. This may noticeably delay the first response of the application when a large number of routes have been added. When adding the last route to the application a `compile` flag may be provided to force the router to compile immediately, thus avoiding any delay for the first response. Note: When using a multi-threaded web server to host the application, it is possible that multiple requests may be routed at the same time upon startup. Therefore, the framework employs a lock to ensure that only a single compilation of the decision tree is performed. See also :meth:`.CompiledRouter.add_route` """ def __init__(self): self._ast = None self._converters = None self._finder_src = None self._options = CompiledRouterOptions() # PERF(kgriffs): This is usually an anti-pattern, but we do it # here to reduce lookup time. self._converter_map = self._options.converters.data self._patterns = None self._return_values = None self._roots = [] # NOTE(caselit): set _find to the delayed compile method to ensure that # compile is called when the router is first used self._find = self._compile_and_find self._compile_lock = Lock() def options(self): ... def finder_src(self): ... def map_http_methods(self, resource, **kwargs): """Map HTTP methods (e.g., GET, POST) to methods of a resource object. This method is called from :meth:`~.add_route` and may be overridden to provide a custom mapping strategy. Args: resource (instance): Object which represents a REST resource. The default maps the HTTP method ``GET`` to ``on_get()``, ``POST`` to ``on_post()``, etc. If any HTTP methods are not supported by your resource, simply don't define the corresponding request handlers, and Falcon will do the right thing. Keyword Args: suffix (str): Optional responder name suffix for this route. If a suffix is provided, Falcon will map GET requests to ``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``, etc. In this way, multiple closely-related routes can be mapped to the same resource. For example, a single resource class can use suffixed responders to distinguish requests for a single item vs. a collection of those same items. Another class might use a suffixed responder to handle a shortlink route in addition to the regular route for the resource. """ return map_http_methods(resource, suffix=kwargs.get('suffix', None)) def add_route(self, uri_template, resource, **kwargs): """Add a route between a URI path template and a resource. This method may be overridden to customize how a route is added. Args: uri_template (str): A URI template to use for the route resource (object): The resource instance to associate with the URI template. Keyword Args: suffix (str): Optional responder name suffix for this route. If a suffix is provided, Falcon will map GET requests to ``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``, etc. In this way, multiple closely-related routes can be mapped to the same resource. For example, a single resource class can use suffixed responders to distinguish requests for a single item vs. a collection of those same items. Another class might use a suffixed responder to handle a shortlink route in addition to the regular route for the resource. compile (bool): Optional flag that can be used to compile the routing logic on this call. By default, :class:`.CompiledRouter` delays compilation until the first request is routed. This may introduce a noticeable amount of latency when handling the first request, especially when the application implements a large number of routes. Setting `compile` to ``True`` when the last route is added ensures that the first request will not be delayed in this case (defaults to ``False``). Note: Always setting this flag to ``True`` may slow down the addition of new routes when hundreds of them are added at once. It is advisable to only set this flag to ``True`` when adding the final route. """ ... def find(self, uri, req=None): """Search for a route that matches the given partial URI. Args: uri(str): The requested path to route. Keyword Args: req: The :class:`falcon.Request` or :class:`falcon.asgi.Request` object that will be passed to the routed responder. Currently the value of this argument is ignored by :class:`~.CompiledRouter`. Routing is based solely on the path. Returns: tuple: A 4-member tuple composed of (resource, method_map, params, uri_template), or ``None`` if no route matches the requested path. """ ... def _require_coroutine_responders(self, method_map): ... def _require_non_coroutine_responders(self, method_map): ... def _validate_template_segment(self, segment, used_names): """Validate a single path segment of a URI template. 1. Ensure field names are valid Python identifiers, since they will be passed as kwargs to responders. 2. Check that there are no duplicate names, since that causes (at least) the following problems: a. For simple nodes, values from deeper nodes overwrite values from more shallow nodes. b. For complex nodes, re.compile() raises a nasty error 3. Check that when the converter syntax is used, the named converter exists. """ ... def _generate_ast( """Generate a coarse AST for the router.""" ... def _generate_conversion_ast( ... def _compile(self): """Generate Python code for the entire routing tree. The generated code is compiled and the resulting Python method is returned. """ ... def _instantiate_converter(self, klass, argstr=None): ... def _compile_and_find(self, path, _return_values, _patterns, _converters, params): """Compile the router, set the `_find` attribute and return its result. This method is set to the `_find` attribute to delay the compilation of the router until it's used for the first time. Subsequent calls to `_find` will be processed by the actual routing function. This method must have the same signature as the function returned by the :meth:`.CompiledRouter._compile`. """ ... # FILE falcon/falcon/inspect.py class InspectVisitor: """Base visitor class that implements the `process` method. Subclasses must implement ``visit_<name>`` methods for each supported class. """ def process(self, instance: _Traversable): """Process the instance, by calling the appropriate visit method. Uses the `__visit_name__` attribute of the `instance` to obtain the method to use. Args: instance (_Traversable): The instance to process. """ try: return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance) except AttributeError as e: raise RuntimeError( 'This visitor does not support {}'.format(type(instance)) ) from e Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/inspect.py from functools import partial import inspect from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Type from falcon.app import App from falcon.routing import CompiledRouter from falcon import app_helpers def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]': """Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes. Default route inspector for CompiledRouter. Args: router (CompiledRouter): The router to inspect. Returns: List[RouteInfo]: A list of :class:`~.RouteInfo`. """
falcon/falcon/inspect.py
falcon.inspect._is_internal
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/inspect.py from functools import partial import inspect from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Type from falcon.app import App from falcon.routing import CompiledRouter from falcon import app_helpers def _is_internal(obj): """Check if the module of the object is a falcon module."""
falcon/falcon/inspect.py
falcon.cmd.inspect_app.load_app
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/cmd/inspect_app.py import argparse import importlib import os import sys import falcon from falcon.inspect import inspect_app from falcon.inspect import inspect_routes from falcon.inspect import StringVisitor def load_app(parser, args):
falcon/falcon/cmd/inspect_app.py
falcon.cmd.inspect_app.make_parser
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/cmd/inspect_app.py import argparse import importlib import os import sys import falcon from falcon.inspect import inspect_app from falcon.inspect import inspect_routes from falcon.inspect import StringVisitor def make_parser(): """Create the parser or the application."""
falcon/falcon/cmd/inspect_app.py
falcon.util.uri.unquote_string
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE falcon/falcon/util/uri.py def parse_query_string(query_string, keep_blank=False, csv=True): """Parse a query string into a dict. Query string parameters are assumed to use standard form-encoding. Only parameters with values are returned. For example, given 'foo=bar&flag', this function would ignore 'flag' unless the `keep_blank_qs_values` option is set. Note: In addition to the standard HTML form-based method for specifying lists by repeating a given param multiple times, Falcon supports a more compact form in which the param may be given a single time but set to a ``list`` of comma-separated elements (e.g., 'foo=a,b,c'). When using this format, all commas uri-encoded will not be treated by Falcon as a delimiter. If the client wants to send a value as a list, it must not encode the commas with the values. The two different ways of specifying lists may not be mixed in a single query string for the same parameter. Args: query_string (str): The query string to parse. keep_blank (bool): Set to ``True`` to return fields even if they do not have a value (default ``False``). For comma-separated values, this option also determines whether or not empty elements in the parsed list are retained. csv: Set to ``False`` in order to disable splitting query parameters on ``,`` (default ``True``). Depending on the user agent, encoding lists as multiple occurrences of the same parameter might be preferable. In this case, setting `parse_qs_csv` to ``False`` will cause the framework to treat commas as literal characters in each occurring parameter value. Returns: dict: A dictionary of (*name*, *value*) pairs, one per query parameter. Note that *value* may be a single ``str``, or a ``list`` of ``str``. Raises: TypeError: `query_string` was not a ``str``. """ params = {} is_encoded = '+' in query_string or '%' in query_string # PERF(kgriffs): This was found to be faster than using a regex, for # both short and long query strings. Tested on CPython 3.4. for field in query_string.split('&'): k, _, v = field.partition('=') if not v and (not keep_blank or not k): continue # Note(steffgrez): Falcon first decode name parameter for handle # utf8 character. if is_encoded: k = decode(k) # NOTE(steffgrez): Falcon decode value at the last moment. So query # parser won't mix up between percent-encoded comma (as value) and # comma-separated list (as reserved character for sub-delimiter) if k in params: # The key was present more than once in the POST data. Convert to # a list, or append the next value to the list. old_value = params[k] if csv and ',' in v: # NOTE(kgriffs): Falcon supports a more compact form of # lists, in which the elements are comma-separated and # assigned to a single param instance. If it turns out that # very few people use this, it can be deprecated at some # point. v = v.split(',') if not keep_blank: # NOTE(kgriffs): Normalize the result in the case that # some elements are empty strings, such that the result # will be the same for 'foo=1,,3' as 'foo=1&foo=&foo=3'. additional_values = [decode(element) for element in v if element] else: additional_values = [decode(element) for element in v] if isinstance(old_value, list): old_value.extend(additional_values) else: additional_values.insert(0, old_value) params[k] = additional_values else: if is_encoded: v = decode(v) if isinstance(old_value, list): old_value.append(v) else: params[k] = [old_value, v] else: if csv and ',' in v: # NOTE(kgriffs): Falcon supports a more compact form of # lists, in which the elements are comma-separated and # assigned to a single param instance. If it turns out that # very few people use this, it can be deprecated at some # point. v = v.split(',') if not keep_blank: # NOTE(kgriffs): Normalize the result in the case that # some elements are empty strings, such that the result # will be the same for 'foo=1,,3' as 'foo=1&foo=&foo=3'. params[k] = [decode(element) for element in v if element] else: params[k] = [decode(element) for element in v] elif is_encoded: params[k] = decode(v) else: params[k] = v return params # FILE falcon/falcon/util/uri.py def decode(encoded_uri, unquote_plus=True): """Decode percent-encoded characters in a URI or query string. This function models the behavior of `urllib.parse.unquote_plus`, albeit in a faster, more straightforward manner. Args: encoded_uri (str): An encoded URI (full or partial). Keyword Arguments: unquote_plus (bool): Set to ``False`` to retain any plus ('+') characters in the given string, rather than converting them to spaces (default ``True``). Typically you should set this to ``False`` when decoding any part of a URI other than the query string. Returns: str: A decoded URL. If the URL contains escaped non-ASCII characters, UTF-8 is assumed per RFC 3986. """ decoded_uri = encoded_uri # PERF(kgriffs): Don't take the time to instantiate a new # string unless we have to. if '+' in decoded_uri and unquote_plus: decoded_uri = decoded_uri.replace('+', ' ') # Short-circuit if we can if '%' not in decoded_uri: return decoded_uri # NOTE(kgriffs): Clients should never submit a URI that has # unescaped non-ASCII chars in them, but just in case they # do, let's encode into a non-lossy format. decoded_uri = decoded_uri.encode() # PERF(kgriffs): This was found to be faster than using # a regex sub call or list comprehension with a join. tokens = decoded_uri.split(b'%') # PERF(vytas): Just use in-place add for a low number of items: if len(tokens) < 8: decoded_uri = tokens[0] for token in tokens[1:]: token_partial = token[:2] try: decoded_uri += _HEX_TO_BYTE[token_partial] + token[2:] except KeyError: # malformed percentage like "x=%" or "y=%+" decoded_uri += b'%' + token # Convert back to str return decoded_uri.decode('utf-8', 'replace') # NOTE(vytas): Decode percent-encoded bytestring fragments and join them # back to a string using the platform-dependent method. return _join_tokens(tokens) Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/util/uri.py from falcon.constants import PYPY from falcon.cyutil.uri import decode as _cy_decode, parse_query_string as _cy_parse_query_string def unquote_string(quoted): """Unquote an RFC 7320 "quoted-string". Args: quoted (str): Original quoted string Returns: str: unquoted string Raises: TypeError: `quoted` was not a ``str``. """
falcon/falcon/util/uri.py
falcon.util.misc.get_argnames
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/util/misc.py import datetime import functools import http import inspect import re import unicodedata from falcon import status_codes from falcon.constants import PYPY from falcon.constants import PYTHON_VERSION from falcon.uri import encode_value from .deprecation import deprecated from falcon.cyutil.misc import encode_items_to_latin1 as _cy_encode_items_to_latin1 from falcon.cyutil.misc import isascii as _cy_isascii def get_argnames(func): """Introspect the arguments of a callable. Args: func: The callable to introspect Returns: A list of argument names, excluding *arg and **kwargs arguments. """
falcon/falcon/util/misc.py
falcon.testing.client._is_asgi_app
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE falcon/falcon/testing/client.py async def _simulate_request_asgi( app, method='GET', path='/', query_string=None, headers=None, content_type=None, body=None, json=None, params=None, params_csv=True, protocol='http', host=helpers.DEFAULT_HOST, remote_addr=None, extras=None, http_version='1.1', port=None, root_path=None, asgi_chunk_size=4096, asgi_disconnect_ttl=300, cookies=None, # NOTE(kgriffs): These are undocumented because they are only # meant to be used internally by the framework (i.e., they are # not part of the public interface.) In case we ever expose # simulate_request_asgi() as part of the public interface, we # don't want these kwargs to be documented. _one_shot=True, _stream_result=False, ) -> _ResultBase: """Simulate a request to an ASGI application. Keyword Args: app (callable): The WSGI or ASGI application to call method (str): An HTTP method to use in the request (default: 'GET') path (str): The URL path to request (default: '/'). Note: The path may contain a query string. However, neither `query_string` nor `params` may be specified in this case. root_path (str): The initial portion of the request URL's "path" that corresponds to the application object, so that the application knows its virtual "location". This defaults to the empty string, indicating that the application corresponds to the "root" of the server. protocol: The protocol to use for the URL scheme (default: 'http') port (int): The TCP port to simulate. Defaults to the standard port used by the given scheme (i.e., 80 for 'http' and 443 for 'https'). A string may also be passed, as long as it can be parsed as an int. params (dict): A dictionary of query string parameters, where each key is a parameter name, and each value is either a ``str`` or something that can be converted into a ``str``, or a list of such values. If a ``list``, the value will be converted to a comma-delimited string of values (e.g., 'thing=1,2,3'). params_csv (bool): Set to ``False`` to encode list values in query string params by specifying multiple instances of the parameter (e.g., 'thing=1&thing=2&thing=3'). Otherwise, parameters will be encoded as comma-separated values (e.g., 'thing=1,2,3'). Defaults to ``True``. query_string (str): A raw query string to include in the request (default: ``None``). If specified, overrides `params`. content_type (str): The value to use for the Content-Type header in the request. If specified, this value will take precedence over any value set for the Content-Type header in the `headers` keyword argument. The ``falcon`` module provides a number of :ref:`constants for common media types <media_type_constants>`. headers (dict): Extra headers as a dict-like (Mapping) object, or an iterable yielding a series of two-member (*name*, *value*) iterables. Each pair of strings provides the name and value for an HTTP header. If desired, multiple header values may be combined into a single (*name*, *value*) pair by joining the values with a comma when the header in question supports the list format (see also RFC 7230 and RFC 7231). Header names are not case-sensitive. Note: If a User-Agent header is not provided, it will default to:: f'falcon-client/{falcon.__version__}' body (str): The body of the request (default ''). The value will be encoded as UTF-8 in the WSGI environ. Alternatively, a byte string may be passed, in which case it will be used as-is. json(JSON serializable): A JSON document to serialize as the body of the request (default: ``None``). If specified, overrides `body` and sets the Content-Type header to ``'application/json'``, overriding any value specified by either the `content_type` or `headers` arguments. host(str): A string to use for the hostname part of the fully qualified request URL (default: 'falconframework.org') remote_addr (str): A string to use as the remote IP address for the request (default: '127.0.0.1'). For WSGI, this corresponds to the 'REMOTE_ADDR' environ variable. For ASGI, this corresponds to the IP address used for the 'client' field in the connection scope. http_version (str): The HTTP version to simulate. Must be either '2', '2.0', 1.1', '1.0', or '1' (default '1.1'). If set to '1.0', the Host header will not be added to the scope. asgi_chunk_size (int): The maximum number of bytes that will be sent to the ASGI app in a single ``'http.request'`` event (default 4096). asgi_disconnect_ttl (int): The maximum number of seconds to wait since the request was initiated, before emitting an ``'http.disconnect'`` event when the app calls the receive() function (default 300). extras (dict): Additional values to add to the WSGI ``environ`` dictionary or the ASGI scope for the request (default: ``None``) cookies (dict): Cookies as a dict-like (Mapping) object, or an iterable yielding a series of two-member (*name*, *value*) iterables. Each pair of items provides the name and value for the 'Set-Cookie' header. Returns: :py:class:`~.Result`: The result of the request """ path, query_string, headers, body, extras = _prepare_sim_args( path, query_string, params, params_csv, content_type, headers, body, json, extras, ) # --------------------------------------------------------------------- # NOTE(kgriffs): 'http' scope # --------------------------------------------------------------------- content_length = None if body is not None: if isinstance(body, str): body = body.encode() content_length = len(body) http_scope = helpers.create_scope( path=path, query_string=query_string, method=method, headers=headers, host=host, scheme=protocol, port=port, http_version=http_version, remote_addr=remote_addr, root_path=root_path, content_length=content_length, cookies=cookies, ) if 'method' in extras and extras['method'] != method.upper(): raise ValueError( 'ASGI scope extras may not override the request method. ' 'Please use the method parameter.' ) http_scope.update(extras) # --------------------------------------------------------------------- if asgi_disconnect_ttl == 0: # Special case disconnect_at = 0 else: disconnect_at = time.time() + max(0, asgi_disconnect_ttl) req_event_emitter = helpers.ASGIRequestEventEmitter( (body or b''), chunk_size=asgi_chunk_size, disconnect_at=disconnect_at, ) resp_event_collector = helpers.ASGIResponseEventCollector() if not _one_shot: task_req = create_task(app(http_scope, req_event_emitter, resp_event_collector)) if _stream_result: # NOTE(kgriffs): Wait until the response has been started and give # the task a chance to progress. Otherwise, we won't have a # status or headers to pass to StreamedResult. while not resp_event_collector.status: await asyncio.sleep(0) return StreamedResult( resp_event_collector.body_chunks, code_to_http_status(resp_event_collector.status), resp_event_collector.headers, task_req, req_event_emitter, ) req_event_emitter.disconnect() await task_req return Result( resp_event_collector.body_chunks, code_to_http_status(resp_event_collector.status), resp_event_collector.headers, ) # --------------------------------------------------------------------- # NOTE(kgriffs): 'lifespan' scope # --------------------------------------------------------------------- lifespan_scope = { 'type': ScopeType.LIFESPAN, 'asgi': { 'version': '3.0', 'spec_version': '2.0', }, } shutting_down = asyncio.Condition() lifespan_event_emitter = helpers.ASGILifespanEventEmitter(shutting_down) lifespan_event_collector = helpers.ASGIResponseEventCollector() # --------------------------------------------------------------------- async def conductor(): # NOTE(kgriffs): We assume this is a Falcon ASGI app, which supports # the lifespan protocol and thus we do not need to catch # exceptions that would signify no lifespan protocol support. task_lifespan = get_running_loop().create_task( app(lifespan_scope, lifespan_event_emitter, lifespan_event_collector) ) await _wait_for_startup(lifespan_event_collector.events) task_req = create_task(app(http_scope, req_event_emitter, resp_event_collector)) req_event_emitter.disconnect() await task_req # NOTE(kgriffs): Notify lifespan_event_emitter that it is OK # to proceed. async with shutting_down: shutting_down.notify() await _wait_for_shutdown(lifespan_event_collector.events) await task_lifespan await conductor() if resp_event_collector.status is None: # NOTE(kgriffs): An immediate disconnect was simulated, and so # the app could not return a status. raise ConnectionError('An immediate disconnect was simulated.') return Result( resp_event_collector.body_chunks, code_to_http_status(resp_event_collector.status), resp_event_collector.headers, ) # FILE falcon/falcon/asgi_spec.py class ScopeType: """Standard ASGI event type strings.""" HTTP = 'http' WS = 'websocket' LIFESPAN = 'lifespan' # FILE falcon/falcon/util/sync.py def create_task(coro, name=None): return asyncio.ensure_future(coro) # FILE falcon/falcon/testing/client.py async def _wait_for_startup(events): # NOTE(kgriffs): This is covered, but our gate for some reason doesn't # understand `while True`. while True: # pragma: nocover for e in events: if e['type'] == 'lifespan.startup.failed': raise RuntimeError( 'ASGI app returned lifespan.startup.failed. ' + e['message'] ) if any(e['type'] == 'lifespan.startup.complete' for e in events): break # NOTE(kgriffs): Yield to the concurrent lifespan task await asyncio.sleep(0) # FILE falcon/falcon/testing/client.py class Cookie: """Represents a cookie returned by a simulated request. Args: morsel: A ``Morsel`` object from which to derive the cookie data. Attributes: name (str): The cookie's name. value (str): The value of the cookie. expires(datetime.datetime): Expiration timestamp for the cookie, or ``None`` if not specified. path (str): The path prefix to which this cookie is restricted, or ``None`` if not specified. domain (str): The domain to which this cookie is restricted, or ``None`` if not specified. max_age (int): The lifetime of the cookie in seconds, or ``None`` if not specified. secure (bool): Whether or not the cookie may only only be transmitted from the client via HTTPS. http_only (bool): Whether or not the cookie may only be included in unscripted requests from the client. """ def __init__(self, morsel): self._name = morsel.key self._value = morsel.value for name in ( 'expires', 'path', 'domain', 'max_age', 'secure', 'httponly', 'samesite', ): value = morsel[name.replace('_', '-')] or None setattr(self, '_' + name, value) def name(self) -> str: return self._name def value(self) -> str: ... def expires(self) -> Optional[dt.datetime]: ... def path(self) -> str: ... def domain(self) -> str: ... def max_age(self) -> Optional[int]: ... def secure(self) -> bool: ... def http_only(self) -> bool: ... def same_site(self) -> Optional[int]: ... Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/testing/client.py import asyncio import datetime as dt import inspect import json as json_module import time from typing import Dict from typing import Optional from typing import Sequence from typing import Union import warnings import wsgiref.validate from falcon.asgi_spec import ScopeType from falcon.constants import COMBINED_METHODS from falcon.constants import MEDIA_JSON from falcon.errors import CompatibilityError from falcon.testing import helpers from falcon.testing.srmock import StartResponseMock from falcon.util import async_to_sync from falcon.util import CaseInsensitiveDict from falcon.util import code_to_http_status from falcon.util import create_task from falcon.util import get_running_loop from falcon.util import http_cookies from falcon.util import http_date_to_dt from falcon.util import to_query_str def _is_asgi_app(app):
falcon/falcon/testing/client.py
falcon.routing.converters.UUIDConverter.convert
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: falcon/falcon/routing/converters.py import abc from datetime import datetime import uuid class UUIDConverter(BaseConverter): """Converts a field value to a uuid.UUID. Identifier: `uuid` In order to be converted, the field value must consist of a string of 32 hexadecimal digits, as defined in RFC 4122, Section 3. Note, however, that hyphens and the URN prefix are optional. """ def convert(self, value):
falcon/falcon/routing/converters.py
rest_framework_simplejwt.utils.make_utc
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE djangorestframework-simplejwt/rest_framework_simplejwt/utils.py def aware_utcnow() -> datetime: return make_utc(datetime.utcnow()) # LIB django def is_naive(value): """ Determine if a given datetime.datetime is naive. The concept is defined in Python's docs: https://docs.python.org/library/datetime.html#datetime.tzinfo Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic. """ return value.utcoffset() is None # FILE djangorestframework-simplejwt/rest_framework_simplejwt/utils.py def datetime_from_epoch(ts: float) -> datetime: return make_utc(datetime.utcfromtimestamp(ts)) Based on the information above, please complete the function: #CURRENT_FILE: djangorestframework-simplejwt/rest_framework_simplejwt/utils.py import hashlib from calendar import timegm from datetime import datetime, timezone from typing import Callable from django.conf import settings from django.utils.functional import lazy from django.utils.timezone import is_naive, make_aware def make_utc(dt: datetime) -> datetime:
djangorestframework-simplejwt/rest_framework_simplejwt/utils.py
boto.sdb.db.sequence.fib
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE boto/boto/sdb/db/sequence.py def increment_by_one(cv=None, lv=None): if cv is None: return 0 return cv + 1 # FILE boto/boto/sdb/db/sequence.py class Sequence(object): """A simple Sequence using the new SDB "Consistent" features Based largly off of the "Counter" example from mitch garnaat: http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py""" def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None): """Create a new Sequence, using an optional function to increment to the next number, by default we just increment by one. Every parameter here is optional, if you don't specify any options then you'll get a new SequenceGenerator with a random ID stored in the default domain that increments by one and uses the default botoweb environment :param id: Optional ID (name) for this counter :type id: str :param domain_name: Optional domain name to use, by default we get this out of the environment configuration :type domain_name:str :param fnc: Optional function to use for the incrementation, by default we just increment by one There are several functions defined in this module. Your function must accept "None" to get the initial value :type fnc: function, str :param init_val: Initial value, by default this is the first element in your sequence, but you can pass in any value, even a string if you pass in a function that uses strings instead of ints to increment """ self._db = None self._value = None self.last_value = None self.domain_name = domain_name self.id = id if init_val is None: init_val = fnc(init_val) if self.id is None: import uuid self.id = str(uuid.uuid4()) self.item_type = type(fnc(None)) self.timestamp = None # Allow us to pass in a full name to a function if isinstance(fnc, six.string_types): from boto.utils import find_class fnc = find_class(fnc) self.fnc = fnc # Bootstrap the value last if not self.val: self.val = init_val def set(self, val): """Set the value""" import time now = time.time() expected_value = [] new_val = {} new_val['timestamp'] = now if self._value is not None: new_val['last_value'] = self._value expected_value = ['current_value', str(self._value)] new_val['current_value'] = val try: self.db.put_attributes(self.id, new_val, expected_value=expected_value) self.timestamp = new_val['timestamp'] except SDBResponseError as e: if e.status == 409: raise ValueError("Sequence out of sync") else: raise def get(self): """Get the value""" ... def __repr__(self): ... def _connect(self): """Connect to our domain""" ... def next(self): ... def delete(self): """Remove this sequence""" ... # FILE boto/boto/sdb/db/sequence.py class Sequence(object): """A simple Sequence using the new SDB "Consistent" features Based largly off of the "Counter" example from mitch garnaat: http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py""" def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None): """Create a new Sequence, using an optional function to increment to the next number, by default we just increment by one. Every parameter here is optional, if you don't specify any options then you'll get a new SequenceGenerator with a random ID stored in the default domain that increments by one and uses the default botoweb environment :param id: Optional ID (name) for this counter :type id: str :param domain_name: Optional domain name to use, by default we get this out of the environment configuration :type domain_name:str :param fnc: Optional function to use for the incrementation, by default we just increment by one There are several functions defined in this module. Your function must accept "None" to get the initial value :type fnc: function, str :param init_val: Initial value, by default this is the first element in your sequence, but you can pass in any value, even a string if you pass in a function that uses strings instead of ints to increment """ self._db = None self._value = None self.last_value = None self.domain_name = domain_name self.id = id if init_val is None: init_val = fnc(init_val) if self.id is None: import uuid self.id = str(uuid.uuid4()) self.item_type = type(fnc(None)) self.timestamp = None # Allow us to pass in a full name to a function if isinstance(fnc, six.string_types): from boto.utils import find_class fnc = find_class(fnc) self.fnc = fnc # Bootstrap the value last if not self.val: self.val = init_val def set(self, val): """Set the value""" ... def get(self): """Get the value""" ... def __repr__(self): ... def _connect(self): """Connect to our domain""" ... def next(self): self.val = self.fnc(self.val, self.last_value) return self.val def delete(self): """Remove this sequence""" ... Based on the information above, please complete the function: #CURRENT_FILE: boto/boto/sdb/db/sequence.py from boto.exception import SDBResponseError from boto.compat import six import uuid from boto.utils import find_class import time import boto def fib(cv=1, lv=0): """The fibonacci sequence, this incrementer uses the last value"""
boto/boto/sdb/db/sequence.py
boto.s3.website.RoutingRules.add_rule
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE boto/boto/s3/website.py class WebsiteConfiguration(object): """ Website configuration for a bucket. :ivar suffix: Suffix that is appended to a request that is for a "directory" on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not be empty and must not include a slash character. :ivar error_key: The object key name to use when a 4xx class error occurs. This key identifies the page that is returned when such an error occurs. :ivar redirect_all_requests_to: Describes the redirect behavior for every request to this bucket's website endpoint. If this value is non None, no other values are considered when configuring the website configuration for the bucket. This is an instance of ``RedirectLocation``. :ivar routing_rules: ``RoutingRules`` object which specifies conditions and redirects that apply when the conditions are met. """ def __init__(self, suffix=None, error_key=None, redirect_all_requests_to=None, routing_rules=None): self.suffix = suffix self.error_key = error_key self.redirect_all_requests_to = redirect_all_requests_to if routing_rules is not None: self.routing_rules = routing_rules else: self.routing_rules = RoutingRules() def startElement(self, name, attrs, connection): if name == 'RoutingRules': self.routing_rules = RoutingRules() return self.routing_rules elif name == 'IndexDocument': return _XMLKeyValue([('Suffix', 'suffix')], container=self) elif name == 'ErrorDocument': return _XMLKeyValue([('Key', 'error_key')], container=self) def endElement(self, name, value, connection): pass def to_xml(self): parts = ['<?xml version="1.0" encoding="UTF-8"?>', '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'] if self.suffix is not None: parts.append(tag('IndexDocument', tag('Suffix', self.suffix))) if self.error_key is not None: parts.append(tag('ErrorDocument', tag('Key', self.error_key))) if self.redirect_all_requests_to is not None: parts.append(self.redirect_all_requests_to.to_xml()) if self.routing_rules: parts.append(self.routing_rules.to_xml()) parts.append('</WebsiteConfiguration>') return ''.join(parts) # FILE boto/boto/s3/website.py class RoutingRule(object): """Represents a single routing rule. There are convenience methods to making creating rules more concise:: rule = RoutingRule.when(key_prefix='foo/').then_redirect('example.com') :ivar condition: Describes condition that must be met for the specified redirect to apply. :ivar redirect: Specifies redirect behavior. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can can specify a different error code to return. """ def __init__(self, condition=None, redirect=None): self.condition = condition self.redirect = redirect def startElement(self, name, attrs, connection): if name == 'Condition': return self.condition elif name == 'Redirect': return self.redirect def endElement(self, name, value, connection): pass def to_xml(self): parts = [] if self.condition: parts.append(self.condition.to_xml()) if self.redirect: parts.append(self.redirect.to_xml()) return tag('RoutingRule', '\n'.join(parts)) @classmethod def when(cls, key_prefix=None, http_error_code=None): return cls(Condition(key_prefix=key_prefix, http_error_code=http_error_code), None) def then_redirect(self, hostname=None, protocol=None, replace_key=None, replace_key_prefix=None, http_redirect_code=None): self.redirect = Redirect( hostname=hostname, protocol=protocol, replace_key=replace_key, replace_key_prefix=replace_key_prefix, http_redirect_code=http_redirect_code) return self Based on the information above, please complete the function: #CURRENT_FILE: boto/boto/s3/website.py class RoutingRules(list): def add_rule(self, rule): """ :type rule: :class:`boto.s3.website.RoutingRule` :param rule: A routing rule. :return: This ``RoutingRules`` object is returned, so that it can chain subsequent calls. """
boto/boto/s3/website.py
boto.cloudfront.distribution.Distribution._canned_policy
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE boto/boto/cloudfront/identity.py class OriginAccessIdentity(object): def __init__(self, connection=None, config=None, id='', s3_user_id='', comment=''): self.connection = connection self.config = config self.id = id self.s3_user_id = s3_user_id self.comment = comment self.etag = None def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def update(self, comment=None): ... def delete(self): ... def uri(self): ... # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ ... def generate_url_sigv4(self, expires_in, method, bucket='', key='', ... def generate_url(self, expires_in, method, bucket='', key='', headers=None, ... def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ ... def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ response = self.make_request('HEAD', bucket_name, headers=headers) body = response.read() if response.status == 200: return self.bucket_class(self, bucket_name) elif response.status == 403: # For backward-compatibility, we'll populate part of the exception # with the most-common default. err = self.provider.storage_response_error( response.status, response.reason, body ) err.error_code = 'AccessDenied' err.error_message = 'Access Denied' raise err elif response.status == 404: # For backward-compatibility, we'll populate part of the exception # with the most-common default. err = self.provider.storage_response_error( response.status, response.reason, body ) err.error_code = 'NoSuchBucket' err.error_message = 'The specified bucket does not exist' raise err else: raise self.provider.storage_response_error( response.status, response.reason, body) def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ ... def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', ... # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ ... def generate_url_sigv4(self, expires_in, method, bucket='', key='', ... def generate_url(self, expires_in, method, bucket='', key='', headers=None, ... def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ ... def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ ... def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ ... def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', query_args=None, sender=None, override_num_retries=None, retry_handler=None): if isinstance(bucket, self.bucket_class): bucket = bucket.name if isinstance(key, Key): key = key.name path = self.calling_format.build_path_base(bucket, key) boto.log.debug('path=%s' % path) auth_path = self.calling_format.build_auth_path(bucket, key) boto.log.debug('auth_path=%s' % auth_path) host = self.calling_format.build_host(self.server_name(), bucket) if query_args: path += '?' + query_args boto.log.debug('path=%s' % path) auth_path += '?' + query_args boto.log.debug('auth_path=%s' % auth_path) return super(S3Connection, self).make_request( method, path, headers, data, host, auth_path, sender, override_num_retries=override_num_retries, retry_handler=retry_handler ) # FILE boto/boto/cloudfront/origin.py class S3Origin(object): """ Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then you use the S3Origin element. """ def __init__(self, dns_name=None, origin_access_identity=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param origin_access_identity: The CloudFront origin access identity to associate with the distribution. If you want the distribution to serve private content, include this element; if you want the distribution to serve public content, remove this element. :type origin_access_identity: str """ self.dns_name = dns_name self.origin_access_identity = origin_access_identity def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): if name == 'DNSName': self.dns_name = value elif name == 'OriginAccessIdentity': self.origin_access_identity = value else: setattr(self, name, value) def to_xml(self): ... # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, acl=None, success_action_redirect=None, max_content_length=None, http_method='http', fields=None, conditions=None, storage_class='STANDARD', server_side_encryption=None): """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ if fields is None: fields = [] if conditions is None: conditions = [] expiration = time.gmtime(int(time.time() + expires_in)) # Generate policy document conditions.append('{"bucket": "%s"}' % bucket_name) if key.endswith("${filename}"): conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")]) else: conditions.append('{"key": "%s"}' % key) if acl: conditions.append('{"acl": "%s"}' % acl) fields.append({"name": "acl", "value": acl}) if success_action_redirect: conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect) fields.append({"name": "success_action_redirect", "value": success_action_redirect}) if max_content_length: conditions.append('["content-length-range", 0, %i]' % max_content_length) if self.provider.security_token: fields.append({'name': 'x-amz-security-token', 'value': self.provider.security_token}) conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token) if storage_class: fields.append({'name': 'x-amz-storage-class', 'value': storage_class}) conditions.append('{"x-amz-storage-class": "%s"}' % storage_class) if server_side_encryption: fields.append({'name': 'x-amz-server-side-encryption', 'value': server_side_encryption}) conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption) policy = self.build_post_policy(expiration, conditions) # Add the base64-encoded policy document as the 'policy' field policy_b64 = base64.b64encode(policy) fields.append({"name": "policy", "value": policy_b64}) # Add the AWS access key as the 'AWSAccessKeyId' field fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id}) # Add signature for encoded policy document as the # 'signature' field signature = self._auth_handler.sign_string(policy_b64) fields.append({"name": "signature", "value": signature}) fields.append({"name": "key", "value": key}) # HTTPS protocol will be used if the secure HTTP option is enabled. url = '%s://%s/' % (http_method, self.calling_format.build_host(self.server_name(), bucket_name)) return {"action": url, "fields": fields} def generate_url_sigv4(self, expires_in, method, bucket='', key='', ... def generate_url(self, expires_in, method, bucket='', key='', headers=None, ... def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ ... def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ ... def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ ... def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', ... # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ ... def generate_url_sigv4(self, expires_in, method, bucket='', key='', ... def generate_url(self, expires_in, method, bucket='', key='', headers=None, ... def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ if validate: return self.head_bucket(bucket_name, headers=headers) else: return self.bucket_class(self, bucket_name) def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ ... def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ ... def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', ... # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): DefaultHost = 's3.amazonaws.com' DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat') QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) @detect_potential_s3sigv4 def _required_auth_capability(self): if self.anon: return ['anon'] else: return ['s3'] def __iter__(self): for bucket in self.get_all_buckets(): yield bucket def __contains__(self, bucket_name): return not (self.lookup(bucket_name) is None) def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ self.bucket_class = bucket_class def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ assert isinstance(expiration_time, time.struct_time), \ 'Policy document must include a valid expiration Time object' # Convert conditions object mappings to condition statements return '{"expiration": "%s",\n"conditions": [%s]}' % \ (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions)) def build_post_form_args(self, bucket_name, key, expires_in=6000, acl=None, success_action_redirect=None, max_content_length=None, http_method='http', fields=None, conditions=None, storage_class='STANDARD', server_side_encryption=None): """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ if fields is None: fields = [] if conditions is None: conditions = [] expiration = time.gmtime(int(time.time() + expires_in)) # Generate policy document conditions.append('{"bucket": "%s"}' % bucket_name) if key.endswith("${filename}"): conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")]) else: conditions.append('{"key": "%s"}' % key) if acl: conditions.append('{"acl": "%s"}' % acl) fields.append({"name": "acl", "value": acl}) if success_action_redirect: conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect) fields.append({"name": "success_action_redirect", "value": success_action_redirect}) if max_content_length: conditions.append('["content-length-range", 0, %i]' % max_content_length) if self.provider.security_token: fields.append({'name': 'x-amz-security-token', 'value': self.provider.security_token}) conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token) if storage_class: fields.append({'name': 'x-amz-storage-class', 'value': storage_class}) conditions.append('{"x-amz-storage-class": "%s"}' % storage_class) if server_side_encryption: fields.append({'name': 'x-amz-server-side-encryption', 'value': server_side_encryption}) conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption) policy = self.build_post_policy(expiration, conditions) # Add the base64-encoded policy document as the 'policy' field policy_b64 = base64.b64encode(policy) fields.append({"name": "policy", "value": policy_b64}) # Add the AWS access key as the 'AWSAccessKeyId' field fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id}) # Add signature for encoded policy document as the # 'signature' field signature = self._auth_handler.sign_string(policy_b64) fields.append({"name": "signature", "value": signature}) fields.append({"name": "key", "value": key}) # HTTPS protocol will be used if the secure HTTP option is enabled. url = '%s://%s/' % (http_method, self.calling_format.build_host(self.server_name(), bucket_name)) return {"action": url, "fields": fields} def generate_url_sigv4(self, expires_in, method, bucket='', key='', headers=None, force_http=False, response_headers=None, version_id=None, iso_date=None): path = self.calling_format.build_path_base(bucket, key) auth_path = self.calling_format.build_auth_path(bucket, key) host = self.calling_format.build_host(self.server_name(), bucket) # For presigned URLs we should ignore the port if it's HTTPS if host.endswith(':443'): host = host[:-4] params = {} if version_id is not None: params['VersionId'] = version_id if response_headers is not None: params.update(response_headers) http_request = self.build_base_http_request(method, path, auth_path, headers=headers, host=host, params=params) return self._auth_handler.presign(http_request, expires_in, iso_date=iso_date) def generate_url(self, expires_in, method, bucket='', key='', headers=None, query_auth=True, force_http=False, response_headers=None, expires_in_absolute=False, version_id=None): if self._auth_handler.capability[0] == 'hmac-v4-s3' and query_auth: # Handle the special sigv4 case return self.generate_url_sigv4(expires_in, method, bucket=bucket, key=key, headers=headers, force_http=force_http, response_headers=response_headers, version_id=version_id) headers = headers or {} if expires_in_absolute: expires = int(expires_in) else: expires = int(time.time() + expires_in) auth_path = self.calling_format.build_auth_path(bucket, key) auth_path = self.get_path(auth_path) # optional version_id and response_headers need to be added to # the query param list. extra_qp = [] if version_id is not None: extra_qp.append("versionId=%s" % version_id) if response_headers: for k, v in response_headers.items(): extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) if self.provider.security_token: headers['x-amz-security-token'] = self.provider.security_token if extra_qp: delimiter = '?' if '?' not in auth_path else '&' auth_path += delimiter + '&'.join(extra_qp) self.calling_format.build_path_base(bucket, key) if query_auth and not self.anon: c_string = boto.utils.canonical_string(method, auth_path, headers, expires, self.provider) b64_hmac = self._auth_handler.sign_string(c_string) encoded_canonical = urllib.parse.quote(b64_hmac, safe='') query_part = '?' + self.QueryString % (encoded_canonical, expires, self.aws_access_key_id) else: query_part = '' if headers: hdr_prefix = self.provider.header_prefix for k, v in headers.items(): if k.startswith(hdr_prefix): # headers used for sig generation must be # included in the url also. extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) if extra_qp: delimiter = '?' if not query_part else '&' query_part += delimiter + '&'.join(extra_qp) if force_http: protocol = 'http' port = 80 else: protocol = self.protocol port = self.port return self.calling_format.build_url_base(self, protocol, self.server_name(port), bucket, key) + query_part def get_all_buckets(self, headers=None): response = self.make_request('GET', headers=headers) body = response.read() if response.status > 300: raise self.provider.storage_response_error( response.status, response.reason, body) rs = ResultSet([('Bucket', self.bucket_class)]) h = handler.XmlHandler(rs, self) if not isinstance(body, bytes): body = body.encode('utf-8') xml.sax.parseString(body, h) return rs def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ rs = self.get_all_buckets(headers=headers) return rs.owner.id def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ if validate: return self.head_bucket(bucket_name, headers=headers) else: return self.bucket_class(self, bucket_name) def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ response = self.make_request('HEAD', bucket_name, headers=headers) body = response.read() if response.status == 200: return self.bucket_class(self, bucket_name) elif response.status == 403: # For backward-compatibility, we'll populate part of the exception # with the most-common default. err = self.provider.storage_response_error( response.status, response.reason, body ) err.error_code = 'AccessDenied' err.error_message = 'Access Denied' raise err elif response.status == 404: # For backward-compatibility, we'll populate part of the exception # with the most-common default. err = self.provider.storage_response_error( response.status, response.reason, body ) err.error_code = 'NoSuchBucket' err.error_message = 'The specified bucket does not exist' raise err else: raise self.provider.storage_response_error( response.status, response.reason, body) def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ try: bucket = self.get_bucket(bucket_name, validate, headers=headers) except: bucket = None return bucket def create_bucket(self, bucket_name, headers=None, location=Location.DEFAULT, policy=None): """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ check_lowercase_bucketname(bucket_name) if policy: if headers: headers[self.provider.acl_header] = policy else: headers = {self.provider.acl_header: policy} if location == Location.DEFAULT: data = '' else: data = '<CreateBucketConfiguration><LocationConstraint>' + \ location + '</LocationConstraint></CreateBucketConfiguration>' response = self.make_request('PUT', bucket_name, headers=headers, data=data) body = response.read() if response.status == 409: raise self.provider.storage_create_error( response.status, response.reason, body) if response.status == 200: return self.bucket_class(self, bucket_name) else: raise self.provider.storage_response_error( response.status, response.reason, body) def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ response = self.make_request('DELETE', bucket, headers=headers) body = response.read() if response.status != 204: raise self.provider.storage_response_error( response.status, response.reason, body) def make_request(self, method, bucket='', key='', headers=None, data='', query_args=None, sender=None, override_num_retries=None, retry_handler=None): if isinstance(bucket, self.bucket_class): bucket = bucket.name if isinstance(key, Key): key = key.name path = self.calling_format.build_path_base(bucket, key) boto.log.debug('path=%s' % path) auth_path = self.calling_format.build_auth_path(bucket, key) boto.log.debug('auth_path=%s' % auth_path) host = self.calling_format.build_host(self.server_name(), bucket) if query_args: path += '?' + query_args boto.log.debug('path=%s' % path) auth_path += '?' + query_args boto.log.debug('auth_path=%s' % auth_path) return super(S3Connection, self).make_request( method, path, headers, data, host, auth_path, sender, override_num_retries=override_num_retries, retry_handler=retry_handler ) # FILE boto/boto/cloudfront/distribution.py class StreamingDistributionSummary(DistributionSummary): def get_distribution(self): return self.connection.get_streaming_distribution_info(self.id) # FILE boto/boto/cloudfront/origin.py class S3Origin(object): """ Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then you use the S3Origin element. """ def __init__(self, dns_name=None, origin_access_identity=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param origin_access_identity: The CloudFront origin access identity to associate with the distribution. If you want the distribution to serve private content, include this element; if you want the distribution to serve public content, remove this element. :type origin_access_identity: str """ self.dns_name = dns_name self.origin_access_identity = origin_access_identity def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def to_xml(self): ... # FILE boto/boto/cloudfront/signers.py class TrustedSigners(list): def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'Self': self.append(name) elif name == 'AwsAccountNumber': self.append(value) # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ ... def generate_url_sigv4(self, expires_in, method, bucket='', key='', ... def generate_url(self, expires_in, method, bucket='', key='', headers=None, ... def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ ... def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ ... def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, location=Location.DEFAULT, policy=None): """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ check_lowercase_bucketname(bucket_name) if policy: if headers: headers[self.provider.acl_header] = policy else: headers = {self.provider.acl_header: policy} if location == Location.DEFAULT: data = '' else: data = '<CreateBucketConfiguration><LocationConstraint>' + \ location + '</LocationConstraint></CreateBucketConfiguration>' response = self.make_request('PUT', bucket_name, headers=headers, data=data) body = response.read() if response.status == 409: raise self.provider.storage_create_error( response.status, response.reason, body) if response.status == 200: return self.bucket_class(self, bucket_name) else: raise self.provider.storage_response_error( response.status, response.reason, body) def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', ... # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ ... def generate_url_sigv4(self, expires_in, method, bucket='', key='', headers=None, force_http=False, response_headers=None, version_id=None, iso_date=None): path = self.calling_format.build_path_base(bucket, key) auth_path = self.calling_format.build_auth_path(bucket, key) host = self.calling_format.build_host(self.server_name(), bucket) # For presigned URLs we should ignore the port if it's HTTPS if host.endswith(':443'): host = host[:-4] params = {} if version_id is not None: params['VersionId'] = version_id if response_headers is not None: params.update(response_headers) http_request = self.build_base_http_request(method, path, auth_path, headers=headers, host=host, params=params) return self._auth_handler.presign(http_request, expires_in, iso_date=iso_date) def generate_url(self, expires_in, method, bucket='', key='', headers=None, ... def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ ... def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ ... def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ ... def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', ... # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ ... def generate_url_sigv4(self, expires_in, method, bucket='', key='', ... def generate_url(self, expires_in, method, bucket='', key='', headers=None, ... def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ ... def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ ... def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ ... def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', ... # FILE boto/boto/cloudfront/origin.py class CustomOrigin(object): """ Origin information to associate with the distribution. If your distribution will use a non-Amazon S3 origin, then you use the CustomOrigin element. """ def __init__(self, dns_name=None, http_port=80, https_port=443, origin_protocol_policy=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param http_port: The HTTP port the custom origin listens on. :type http_port: int :param https_port: The HTTPS port the custom origin listens on. :type http_port: int :param origin_protocol_policy: The origin protocol policy to apply to your origin. If you specify http-only, CloudFront will use HTTP only to access the origin. If you specify match-viewer, CloudFront will fetch from your origin using HTTP or HTTPS, based on the protocol of the viewer request. :type origin_protocol_policy: str """ self.dns_name = dns_name self.http_port = http_port self.https_port = https_port self.origin_protocol_policy = origin_protocol_policy def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def to_xml(self): ... # FILE boto/boto/cloudfront/origin.py class CustomOrigin(object): """ Origin information to associate with the distribution. If your distribution will use a non-Amazon S3 origin, then you use the CustomOrigin element. """ def __init__(self, dns_name=None, http_port=80, https_port=443, origin_protocol_policy=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param http_port: The HTTP port the custom origin listens on. :type http_port: int :param https_port: The HTTPS port the custom origin listens on. :type http_port: int :param origin_protocol_policy: The origin protocol policy to apply to your origin. If you specify http-only, CloudFront will use HTTP only to access the origin. If you specify match-viewer, CloudFront will fetch from your origin using HTTP or HTTPS, based on the protocol of the viewer request. :type origin_protocol_policy: str """ self.dns_name = dns_name self.http_port = http_port self.https_port = https_port self.origin_protocol_policy = origin_protocol_policy def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): if name == 'DNSName': self.dns_name = value elif name == 'HTTPPort': try: self.http_port = int(value) except ValueError: self.http_port = value elif name == 'HTTPSPort': try: self.https_port = int(value) except ValueError: self.https_port = value elif name == 'OriginProtocolPolicy': self.origin_protocol_policy = value else: setattr(self, name, value) def to_xml(self): ... # FILE boto/boto/cloudfront/distribution.py class DistributionConfig(object): def __init__(self, connection=None, origin=None, enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, default_root_object=None, logging=None): """ :param origin: Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then this should be an S3Origin object. If your distribution will use a custom origin (non Amazon S3), then this should be a CustomOrigin object. :type origin: :class:`boto.cloudfront.origin.S3Origin` or :class:`boto.cloudfront.origin.CustomOrigin` :param enabled: Whether the distribution is enabled to accept end user requests for content. :type enabled: bool :param caller_reference: A unique number that ensures the request can't be replayed. If no caller_reference is provided, boto will generate a type 4 UUID for use as the caller reference. :type enabled: str :param cnames: A CNAME alias you want to associate with this distribution. You can have up to 10 CNAME aliases per distribution. :type enabled: array of str :param comment: Any comments you want to include about the distribution. :type comment: str :param trusted_signers: Specifies any AWS accounts you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, this should contain a TrustedSigners object; if you want the distribution to use basic URLs, leave this None. :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners` :param default_root_object: Designates a default root object. Only include a DefaultRootObject value if you are going to assign a default root object for the distribution. :type comment: str :param logging: Controls whether access logs are written for the distribution. If you want to turn on access logs, this should contain a LoggingInfo object; otherwise it should contain None. :type logging: :class`boto.cloudfront.logging.LoggingInfo` """ self.connection = connection self.origin = origin self.enabled = enabled if caller_reference: self.caller_reference = caller_reference else: self.caller_reference = str(uuid.uuid4()) self.cnames = [] if cnames: self.cnames = cnames self.comment = comment self.trusted_signers = trusted_signers self.logging = logging self.default_root_object = default_root_object def __repr__(self): ... def to_xml(self): ... def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() return self.trusted_signers elif name == 'Logging': self.logging = LoggingInfo() return self.logging elif name == 'S3Origin': self.origin = S3Origin() return self.origin elif name == 'CustomOrigin': self.origin = CustomOrigin() return self.origin else: return None def endElement(self, name, value, connection): ... # FILE boto/boto/s3/acl.py class ACL(object): def __init__(self, policy=None): self.policy = policy self.grants = [] def add_grant(self, grant): self.grants.append(grant) def add_email_grant(self, permission, email_address): grant = Grant(permission=permission, type='AmazonCustomerByEmail', email_address=email_address) self.grants.append(grant) def add_user_grant(self, permission, user_id, display_name=None): grant = Grant(permission=permission, type='CanonicalUser', id=user_id, display_name=display_name) self.grants.append(grant) def startElement(self, name, attrs, connection): if name == 'Grant': self.grants.append(Grant(self)) return self.grants[-1] else: return None def endElement(self, name, value, connection): if name == 'Grant': pass else: setattr(self, name, value) def to_xml(self): s = '<AccessControlList>' for grant in self.grants: s += grant.to_xml() s += '</AccessControlList>' return s # FILE boto/boto/s3/connection.py class S3Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host=NoHostProvided, debug=0, https_connection_factory=None, calling_format=DefaultCallingFormat, path='/', provider='aws', bucket_class=Bucket, security_token=None, suppress_consec_slashes=True, anon=False, validate_certs=None, profile_name=None): no_host_provided = False # Try falling back to the boto config file's value, if present. if host is NoHostProvided: host = boto.config.get('s3', 'host') if host is None: host = self.DefaultHost no_host_provided = True if isinstance(calling_format, six.string_types): calling_format=boto.utils.find_class(calling_format)() self.calling_format = calling_format self.bucket_class = bucket_class self.anon = anon super(S3Connection, self).__init__(host, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug=debug, https_connection_factory=https_connection_factory, path=path, provider=provider, security_token=security_token, suppress_consec_slashes=suppress_consec_slashes, validate_certs=validate_certs, profile_name=profile_name) # We need to delay until after the call to ``super`` before checking # to see if SigV4 is in use. if no_host_provided: if 'hmac-v4-s3' in self._required_auth_capability(): raise HostRequiredError( "When using SigV4, you must specify a 'host' parameter." ) def _required_auth_capability(self): ... def __iter__(self): ... def __contains__(self, bucket_name): ... def set_bucket_class(self, bucket_class): """ Set the Bucket class associated with this bucket. By default, this would be the boto.s3.key.Bucket class but if you want to subclass that for some reason this allows you to associate your new class. :type bucket_class: class :param bucket_class: A subclass of Bucket that can be more specific """ ... def build_post_policy(self, expiration_time, conditions): """ Taken from the AWS book Python examples and modified for use with boto """ ... def build_post_form_args(self, bucket_name, key, expires_in=6000, """ Taken from the AWS book Python examples and modified for use with boto This only returns the arguments required for the post form, not the actual form. This does not return the file input field which also needs to be added :type bucket_name: string :param bucket_name: Bucket to submit to :type key: string :param key: Key name, optionally add ${filename} to the end to attach the submitted filename :type expires_in: integer :param expires_in: Time (in seconds) before this expires, defaults to 6000 :type acl: string :param acl: A canned ACL. One of: * private * public-read * public-read-write * authenticated-read * bucket-owner-read * bucket-owner-full-control :type success_action_redirect: string :param success_action_redirect: URL to redirect to on success :type max_content_length: integer :param max_content_length: Maximum size for this file :type http_method: string :param http_method: HTTP Method to use, "http" or "https" :type storage_class: string :param storage_class: Storage class to use for storing the object. Valid values: STANDARD | REDUCED_REDUNDANCY :type server_side_encryption: string :param server_side_encryption: Specifies server-side encryption algorithm to use when Amazon S3 creates an object. Valid values: None | AES256 :rtype: dict :return: A dictionary containing field names/values as well as a url to POST to .. code-block:: python """ ... def generate_url_sigv4(self, expires_in, method, bucket='', key='', ... def generate_url(self, expires_in, method, bucket='', key='', headers=None, query_auth=True, force_http=False, response_headers=None, expires_in_absolute=False, version_id=None): if self._auth_handler.capability[0] == 'hmac-v4-s3' and query_auth: # Handle the special sigv4 case return self.generate_url_sigv4(expires_in, method, bucket=bucket, key=key, headers=headers, force_http=force_http, response_headers=response_headers, version_id=version_id) headers = headers or {} if expires_in_absolute: expires = int(expires_in) else: expires = int(time.time() + expires_in) auth_path = self.calling_format.build_auth_path(bucket, key) auth_path = self.get_path(auth_path) # optional version_id and response_headers need to be added to # the query param list. extra_qp = [] if version_id is not None: extra_qp.append("versionId=%s" % version_id) if response_headers: for k, v in response_headers.items(): extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) if self.provider.security_token: headers['x-amz-security-token'] = self.provider.security_token if extra_qp: delimiter = '?' if '?' not in auth_path else '&' auth_path += delimiter + '&'.join(extra_qp) self.calling_format.build_path_base(bucket, key) if query_auth and not self.anon: c_string = boto.utils.canonical_string(method, auth_path, headers, expires, self.provider) b64_hmac = self._auth_handler.sign_string(c_string) encoded_canonical = urllib.parse.quote(b64_hmac, safe='') query_part = '?' + self.QueryString % (encoded_canonical, expires, self.aws_access_key_id) else: query_part = '' if headers: hdr_prefix = self.provider.header_prefix for k, v in headers.items(): if k.startswith(hdr_prefix): # headers used for sig generation must be # included in the url also. extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) if extra_qp: delimiter = '?' if not query_part else '&' query_part += delimiter + '&'.join(extra_qp) if force_http: protocol = 'http' port = 80 else: protocol = self.protocol port = self.port return self.calling_format.build_url_base(self, protocol, self.server_name(port), bucket, key) + query_part def get_all_buckets(self, headers=None): ... def get_canonical_user_id(self, headers=None): """ Convenience method that returns the "CanonicalUserID" of the user who's credentials are associated with the connection. The only way to get this value is to do a GET request on the service which returns all buckets associated with the account. As part of that response, the canonical userid is returned. This method simply does all of that and then returns just the user id. :rtype: string :return: A string containing the canonical user id. """ ... def get_bucket(self, bucket_name, validate=True, headers=None): """ Retrieves a bucket by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. If you are unsure if the bucket exists or not, you can use the ``S3Connection.lookup`` method, which will either return a valid bucket or ``None``. If ``validate=False`` is passed, no request is made to the service (no charge/communication delay). This is only safe to do if you are **sure** the bucket exists. If the default ``validate=True`` is passed, a request is made to the service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched a list of keys (but with a max limit set to ``0``, always returning an empty list) in the bucket (& included better error messages), at an increased expense. As of Boto v2.25.0, this now performs a HEAD request (less expensive but worse error messages). If you were relying on parsing the error message before, you should call something like:: bucket = conn.get_bucket('<bucket_name>', validate=False) bucket.get_all_keys(maxkeys=0) :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to verify the bucket exists on the service-side. (Default: ``True``) """ ... def head_bucket(self, bucket_name, headers=None): """ Determines if a bucket exists by name. If the bucket does not exist, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :returns: A <Bucket> object """ ... def lookup(self, bucket_name, validate=True, headers=None): """ Attempts to get a bucket from S3. Works identically to ``S3Connection.get_bucket``, save for that it will return ``None`` if the bucket does not exist instead of throwing an exception. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type validate: boolean :param validate: If ``True``, it will try to fetch all keys within the given bucket. (Default: ``True``) """ ... def create_bucket(self, bucket_name, headers=None, """ Creates a new located bucket. By default it's in the USA. You can pass Location.EU to create a European bucket (S3) or European Union bucket (GCS). :type bucket_name: string :param bucket_name: The name of the new bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type location: str :param location: The location of the new bucket. You can use one of the constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, Location.USWest, etc.). :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. """ ... def delete_bucket(self, bucket, headers=None): """ Removes an S3 bucket. In order to remove the bucket, it must first be empty. If the bucket is not empty, an ``S3ResponseError`` will be raised. :type bucket_name: string :param bucket_name: The name of the bucket :type headers: dict :param headers: Additional headers to pass along with the request to AWS. """ ... def make_request(self, method, bucket='', key='', headers=None, data='', ... # FILE boto/boto/cloudfront/signers.py class ActiveTrustedSigners(list): def startElement(self, name, attrs, connection): if name == 'Signer': s = Signer() self.append(s) return s def endElement(self, name, value, connection): pass # FILE boto/boto/cloudfront/object.py class Object(Key): def __init__(self, bucket, name=None): super(Object, self).__init__(bucket, name=name) self.distribution = bucket.distribution def __repr__(self): return '<Object: %s/%s>' % (self.distribution.config.origin, self.name) def url(self, scheme='http'): url = '%s://' % scheme url += self.distribution.domain_name if scheme.lower().startswith('rtmp'): url += '/cfx/st/' else: url += '/' url += self.name return url # FILE boto/boto/cloudfront/distribution.py class StreamingDistribution(Distribution): def __init__(self, connection=None, config=None, domain_name='', id='', last_modified_time=None, status=''): super(StreamingDistribution, self).__init__(connection, config, domain_name, id, last_modified_time, status) self._object_class = StreamingObject def startElement(self, name, attrs, connection): ... def update(self, enabled=None, cnames=None, comment=None): """ Update the configuration of the StreamingDistribution. The only values of the StreamingDistributionConfig that can be directly updated are: * CNAMES * Comment * Whether the Distribution is enabled or not Any changes to the ``trusted_signers`` or ``origin`` properties of this distribution's current config object will also be included in the update. Therefore, to set the origin access identity for this distribution, set ``StreamingDistribution.config.origin.origin_access_identity`` before calling this update method. :type enabled: bool :param enabled: Whether the StreamingDistribution is active or not. :type cnames: list of str :param cnames: The DNS CNAME's associated with this Distribution. Maximum of 10 values. :type comment: str or unicode :param comment: The comment associated with the Distribution. """ ... def delete(self): ... # FILE boto/boto/cloudfront/origin.py class CustomOrigin(object): """ Origin information to associate with the distribution. If your distribution will use a non-Amazon S3 origin, then you use the CustomOrigin element. """ def __init__(self, dns_name=None, http_port=80, https_port=443, origin_protocol_policy=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param http_port: The HTTP port the custom origin listens on. :type http_port: int :param https_port: The HTTPS port the custom origin listens on. :type http_port: int :param origin_protocol_policy: The origin protocol policy to apply to your origin. If you specify http-only, CloudFront will use HTTP only to access the origin. If you specify match-viewer, CloudFront will fetch from your origin using HTTP or HTTPS, based on the protocol of the viewer request. :type origin_protocol_policy: str """ self.dns_name = dns_name self.http_port = http_port self.https_port = https_port self.origin_protocol_policy = origin_protocol_policy def __repr__(self): return '<CustomOrigin: %s>' % self.dns_name def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'DNSName': self.dns_name = value elif name == 'HTTPPort': try: self.http_port = int(value) except ValueError: self.http_port = value elif name == 'HTTPSPort': try: self.https_port = int(value) except ValueError: self.https_port = value elif name == 'OriginProtocolPolicy': self.origin_protocol_policy = value else: setattr(self, name, value) def to_xml(self): s = ' <CustomOrigin>\n' s += ' <DNSName>%s</DNSName>\n' % self.dns_name s += ' <HTTPPort>%d</HTTPPort>\n' % self.http_port s += ' <HTTPSPort>%d</HTTPSPort>\n' % self.https_port s += ' <OriginProtocolPolicy>%s</OriginProtocolPolicy>\n' % self.origin_protocol_policy s += ' </CustomOrigin>\n' return s # FILE boto/boto/cloudfront/distribution.py class DistributionSummary(object): def __init__(self, connection=None, domain_name='', id='', last_modified_time=None, status='', origin=None, cname='', comment='', enabled=False): self.connection = connection self.domain_name = domain_name self.id = id self.last_modified_time = last_modified_time self.status = status self.origin = origin self.enabled = enabled self.cnames = [] if cname: self.cnames.append(cname) self.comment = comment self.trusted_signers = None self.etag = None self.streaming = False def __repr__(self): ... def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() return self.trusted_signers elif name == 'S3Origin': self.origin = S3Origin() return self.origin elif name == 'CustomOrigin': self.origin = CustomOrigin() return self.origin return None def endElement(self, name, value, connection): ... def get_distribution(self): ... # FILE boto/boto/cloudfront/origin.py class S3Origin(object): """ Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then you use the S3Origin element. """ def __init__(self, dns_name=None, origin_access_identity=None): """ :param dns_name: The DNS name of your Amazon S3 bucket to associate with the distribution. For example: mybucket.s3.amazonaws.com. :type dns_name: str :param origin_access_identity: The CloudFront origin access identity to associate with the distribution. If you want the distribution to serve private content, include this element; if you want the distribution to serve public content, remove this element. :type origin_access_identity: str """ self.dns_name = dns_name self.origin_access_identity = origin_access_identity def __repr__(self): return '<S3Origin: %s>' % self.dns_name def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'DNSName': self.dns_name = value elif name == 'OriginAccessIdentity': self.origin_access_identity = value else: setattr(self, name, value) def to_xml(self): s = ' <S3Origin>\n' s += ' <DNSName>%s</DNSName>\n' % self.dns_name if self.origin_access_identity: val = get_oai_value(self.origin_access_identity) s += ' <OriginAccessIdentity>%s</OriginAccessIdentity>\n' % val s += ' </S3Origin>\n' return s # FILE boto/boto/cloudfront/identity.py class OriginAccessIdentity(object): def __init__(self, connection=None, config=None, id='', s3_user_id='', comment=''): self.connection = connection self.config = config self.id = id self.s3_user_id = s3_user_id self.comment = comment self.etag = None def startElement(self, name, attrs, connection): if name == 'CloudFrontOriginAccessIdentityConfig': self.config = OriginAccessIdentityConfig() return self.config else: return None def endElement(self, name, value, connection): if name == 'Id': self.id = value elif name == 'S3CanonicalUserId': self.s3_user_id = value elif name == 'Comment': self.comment = value else: setattr(self, name, value) def update(self, comment=None): new_config = OriginAccessIdentityConfig(self.connection, self.config.caller_reference, self.config.comment) if comment is not None: new_config.comment = comment self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config) self.config = new_config def delete(self): return self.connection.delete_origin_access_identity(self.id, self.etag) def uri(self): return 'origin-access-identity/cloudfront/%s' % self.id # FILE boto/boto/cloudfront/distribution.py class DistributionConfig(object): def __init__(self, connection=None, origin=None, enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, default_root_object=None, logging=None): """ :param origin: Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then this should be an S3Origin object. If your distribution will use a custom origin (non Amazon S3), then this should be a CustomOrigin object. :type origin: :class:`boto.cloudfront.origin.S3Origin` or :class:`boto.cloudfront.origin.CustomOrigin` :param enabled: Whether the distribution is enabled to accept end user requests for content. :type enabled: bool :param caller_reference: A unique number that ensures the request can't be replayed. If no caller_reference is provided, boto will generate a type 4 UUID for use as the caller reference. :type enabled: str :param cnames: A CNAME alias you want to associate with this distribution. You can have up to 10 CNAME aliases per distribution. :type enabled: array of str :param comment: Any comments you want to include about the distribution. :type comment: str :param trusted_signers: Specifies any AWS accounts you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, this should contain a TrustedSigners object; if you want the distribution to use basic URLs, leave this None. :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners` :param default_root_object: Designates a default root object. Only include a DefaultRootObject value if you are going to assign a default root object for the distribution. :type comment: str :param logging: Controls whether access logs are written for the distribution. If you want to turn on access logs, this should contain a LoggingInfo object; otherwise it should contain None. :type logging: :class`boto.cloudfront.logging.LoggingInfo` """ self.connection = connection self.origin = origin self.enabled = enabled if caller_reference: self.caller_reference = caller_reference else: self.caller_reference = str(uuid.uuid4()) self.cnames = [] if cnames: self.cnames = cnames self.comment = comment self.trusted_signers = trusted_signers self.logging = logging self.default_root_object = default_root_object def __repr__(self): ... def to_xml(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... # FILE boto/boto/cloudfront/distribution.py class StreamingDistributionConfig(DistributionConfig): def __init__(self, connection=None, origin='', enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, logging=None): super(StreamingDistributionConfig, self).__init__(connection=connection, origin=origin, enabled=enabled, caller_reference=caller_reference, cnames=cnames, comment=comment, trusted_signers=trusted_signers, logging=logging) def to_xml(self): ... # FILE boto/boto/cloudfront/distribution.py class StreamingDistributionConfig(DistributionConfig): def __init__(self, connection=None, origin='', enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, logging=None): super(StreamingDistributionConfig, self).__init__(connection=connection, origin=origin, enabled=enabled, caller_reference=caller_reference, cnames=cnames, comment=comment, trusted_signers=trusted_signers, logging=logging) def to_xml(self): s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n' if self.origin: s += self.origin.to_xml() s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference for cname in self.cnames: s += ' <CNAME>%s</CNAME>\n' % cname if self.comment: s += ' <Comment>%s</Comment>\n' % self.comment s += ' <Enabled>' if self.enabled: s += 'true' else: s += 'false' s += '</Enabled>\n' if self.trusted_signers: s += '<TrustedSigners>\n' for signer in self.trusted_signers: if signer == 'Self': s += ' <Self/>\n' else: s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer s += '</TrustedSigners>\n' if self.logging: s += '<Logging>\n' s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix s += '</Logging>\n' s += '</StreamingDistributionConfig>\n' return s # FILE boto/boto/cloudfront/distribution.py class DistributionSummary(object): def __init__(self, connection=None, domain_name='', id='', last_modified_time=None, status='', origin=None, cname='', comment='', enabled=False): self.connection = connection self.domain_name = domain_name self.id = id self.last_modified_time = last_modified_time self.status = status self.origin = origin self.enabled = enabled self.cnames = [] if cname: self.cnames.append(cname) self.comment = comment self.trusted_signers = None self.etag = None self.streaming = False def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def get_distribution(self): ... # FILE boto/boto/cloudfront/distribution.py class DistributionConfig(object): def __init__(self, connection=None, origin=None, enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, default_root_object=None, logging=None): """ :param origin: Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then this should be an S3Origin object. If your distribution will use a custom origin (non Amazon S3), then this should be a CustomOrigin object. :type origin: :class:`boto.cloudfront.origin.S3Origin` or :class:`boto.cloudfront.origin.CustomOrigin` :param enabled: Whether the distribution is enabled to accept end user requests for content. :type enabled: bool :param caller_reference: A unique number that ensures the request can't be replayed. If no caller_reference is provided, boto will generate a type 4 UUID for use as the caller reference. :type enabled: str :param cnames: A CNAME alias you want to associate with this distribution. You can have up to 10 CNAME aliases per distribution. :type enabled: array of str :param comment: Any comments you want to include about the distribution. :type comment: str :param trusted_signers: Specifies any AWS accounts you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, this should contain a TrustedSigners object; if you want the distribution to use basic URLs, leave this None. :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners` :param default_root_object: Designates a default root object. Only include a DefaultRootObject value if you are going to assign a default root object for the distribution. :type comment: str :param logging: Controls whether access logs are written for the distribution. If you want to turn on access logs, this should contain a LoggingInfo object; otherwise it should contain None. :type logging: :class`boto.cloudfront.logging.LoggingInfo` """ self.connection = connection self.origin = origin self.enabled = enabled if caller_reference: self.caller_reference = caller_reference else: self.caller_reference = str(uuid.uuid4()) self.cnames = [] if cnames: self.cnames = cnames self.comment = comment self.trusted_signers = trusted_signers self.logging = logging self.default_root_object = default_root_object def __repr__(self): return "DistributionConfig:%s" % self.origin def to_xml(self): s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n' if self.origin: s += self.origin.to_xml() s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference for cname in self.cnames: s += ' <CNAME>%s</CNAME>\n' % cname if self.comment: s += ' <Comment>%s</Comment>\n' % self.comment s += ' <Enabled>' if self.enabled: s += 'true' else: s += 'false' s += '</Enabled>\n' if self.trusted_signers: s += '<TrustedSigners>\n' for signer in self.trusted_signers: if signer == 'Self': s += ' <Self></Self>\n' else: s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer s += '</TrustedSigners>\n' if self.logging: s += '<Logging>\n' s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix s += '</Logging>\n' if self.default_root_object: dro = self.default_root_object s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro s += '</DistributionConfig>\n' return s def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() return self.trusted_signers elif name == 'Logging': self.logging = LoggingInfo() return self.logging elif name == 'S3Origin': self.origin = S3Origin() return self.origin elif name == 'CustomOrigin': self.origin = CustomOrigin() return self.origin else: return None def endElement(self, name, value, connection): if name == 'CNAME': self.cnames.append(value) elif name == 'Comment': self.comment = value elif name == 'Enabled': if value.lower() == 'true': self.enabled = True else: self.enabled = False elif name == 'CallerReference': self.caller_reference = value elif name == 'DefaultRootObject': self.default_root_object = value else: setattr(self, name, value) # FILE boto/boto/cloudfront/distribution.py class DistributionConfig(object): def __init__(self, connection=None, origin=None, enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, default_root_object=None, logging=None): """ :param origin: Origin information to associate with the distribution. If your distribution will use an Amazon S3 origin, then this should be an S3Origin object. If your distribution will use a custom origin (non Amazon S3), then this should be a CustomOrigin object. :type origin: :class:`boto.cloudfront.origin.S3Origin` or :class:`boto.cloudfront.origin.CustomOrigin` :param enabled: Whether the distribution is enabled to accept end user requests for content. :type enabled: bool :param caller_reference: A unique number that ensures the request can't be replayed. If no caller_reference is provided, boto will generate a type 4 UUID for use as the caller reference. :type enabled: str :param cnames: A CNAME alias you want to associate with this distribution. You can have up to 10 CNAME aliases per distribution. :type enabled: array of str :param comment: Any comments you want to include about the distribution. :type comment: str :param trusted_signers: Specifies any AWS accounts you want to permit to create signed URLs for private content. If you want the distribution to use signed URLs, this should contain a TrustedSigners object; if you want the distribution to use basic URLs, leave this None. :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners` :param default_root_object: Designates a default root object. Only include a DefaultRootObject value if you are going to assign a default root object for the distribution. :type comment: str :param logging: Controls whether access logs are written for the distribution. If you want to turn on access logs, this should contain a LoggingInfo object; otherwise it should contain None. :type logging: :class`boto.cloudfront.logging.LoggingInfo` """ self.connection = connection self.origin = origin self.enabled = enabled if caller_reference: self.caller_reference = caller_reference else: self.caller_reference = str(uuid.uuid4()) self.cnames = [] if cnames: self.cnames = cnames self.comment = comment self.trusted_signers = trusted_signers self.logging = logging self.default_root_object = default_root_object def __repr__(self): ... def to_xml(self): s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n' if self.origin: s += self.origin.to_xml() s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference for cname in self.cnames: s += ' <CNAME>%s</CNAME>\n' % cname if self.comment: s += ' <Comment>%s</Comment>\n' % self.comment s += ' <Enabled>' if self.enabled: s += 'true' else: s += 'false' s += '</Enabled>\n' if self.trusted_signers: s += '<TrustedSigners>\n' for signer in self.trusted_signers: if signer == 'Self': s += ' <Self></Self>\n' else: s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer s += '</TrustedSigners>\n' if self.logging: s += '<Logging>\n' s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix s += '</Logging>\n' if self.default_root_object: dro = self.default_root_object s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro s += '</DistributionConfig>\n' return s def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... # FILE boto/boto/cloudfront/distribution.py class DistributionSummary(object): def __init__(self, connection=None, domain_name='', id='', last_modified_time=None, status='', origin=None, cname='', comment='', enabled=False): self.connection = connection self.domain_name = domain_name self.id = id self.last_modified_time = last_modified_time self.status = status self.origin = origin self.enabled = enabled self.cnames = [] if cname: self.cnames.append(cname) self.comment = comment self.trusted_signers = None self.etag = None self.streaming = False def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): if name == 'Id': self.id = value elif name == 'Status': self.status = value elif name == 'LastModifiedTime': self.last_modified_time = value elif name == 'DomainName': self.domain_name = value elif name == 'Origin': self.origin = value elif name == 'CNAME': self.cnames.append(value) elif name == 'Comment': self.comment = value elif name == 'Enabled': if value.lower() == 'true': self.enabled = True else: self.enabled = False elif name == 'StreamingDistributionSummary': self.streaming = True else: setattr(self, name, value) def get_distribution(self): ... # FILE boto/boto/cloudfront/distribution.py class StreamingDistributionConfig(DistributionConfig): def __init__(self, connection=None, origin='', enabled=False, caller_reference='', cnames=None, comment='', trusted_signers=None, logging=None): super(StreamingDistributionConfig, self).__init__(connection=connection, origin=origin, enabled=enabled, caller_reference=caller_reference, cnames=cnames, comment=comment, trusted_signers=trusted_signers, logging=logging) def to_xml(self): s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n' if self.origin: s += self.origin.to_xml() s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference for cname in self.cnames: s += ' <CNAME>%s</CNAME>\n' % cname if self.comment: s += ' <Comment>%s</Comment>\n' % self.comment s += ' <Enabled>' if self.enabled: s += 'true' else: s += 'false' s += '</Enabled>\n' if self.trusted_signers: s += '<TrustedSigners>\n' for signer in self.trusted_signers: if signer == 'Self': s += ' <Self/>\n' else: s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer s += '</TrustedSigners>\n' if self.logging: s += '<Logging>\n' s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix s += '</Logging>\n' s += '</StreamingDistributionConfig>\n' return s # FILE boto/boto/cloudfront/distribution.py class StreamingDistribution(Distribution): def __init__(self, connection=None, config=None, domain_name='', id='', last_modified_time=None, status=''): super(StreamingDistribution, self).__init__(connection, config, domain_name, id, last_modified_time, status) self._object_class = StreamingObject def startElement(self, name, attrs, connection): ... def update(self, enabled=None, cnames=None, comment=None): """ Update the configuration of the StreamingDistribution. The only values of the StreamingDistributionConfig that can be directly updated are: * CNAMES * Comment * Whether the Distribution is enabled or not Any changes to the ``trusted_signers`` or ``origin`` properties of this distribution's current config object will also be included in the update. Therefore, to set the origin access identity for this distribution, set ``StreamingDistribution.config.origin.origin_access_identity`` before calling this update method. :type enabled: bool :param enabled: Whether the StreamingDistribution is active or not. :type cnames: list of str :param cnames: The DNS CNAME's associated with this Distribution. Maximum of 10 values. :type comment: str or unicode :param comment: The comment associated with the Distribution. """ new_config = StreamingDistributionConfig(self.connection, self.config.origin, self.config.enabled, self.config.caller_reference, self.config.cnames, self.config.comment, self.config.trusted_signers) if enabled is not None: new_config.enabled = enabled if cnames is not None: new_config.cnames = cnames if comment is not None: new_config.comment = comment self.etag = self.connection.set_streaming_distribution_config(self.id, self.etag, new_config) self.config = new_config self._object_class = StreamingObject def delete(self): ... # FILE boto/boto/cloudfront/distribution.py class StreamingDistribution(Distribution): def __init__(self, connection=None, config=None, domain_name='', id='', last_modified_time=None, status=''): super(StreamingDistribution, self).__init__(connection, config, domain_name, id, last_modified_time, status) self._object_class = StreamingObject def startElement(self, name, attrs, connection): if name == 'StreamingDistributionConfig': self.config = StreamingDistributionConfig() return self.config else: return super(StreamingDistribution, self).startElement(name, attrs, connection) def update(self, enabled=None, cnames=None, comment=None): """ Update the configuration of the StreamingDistribution. The only values of the StreamingDistributionConfig that can be directly updated are: * CNAMES * Comment * Whether the Distribution is enabled or not Any changes to the ``trusted_signers`` or ``origin`` properties of this distribution's current config object will also be included in the update. Therefore, to set the origin access identity for this distribution, set ``StreamingDistribution.config.origin.origin_access_identity`` before calling this update method. :type enabled: bool :param enabled: Whether the StreamingDistribution is active or not. :type cnames: list of str :param cnames: The DNS CNAME's associated with this Distribution. Maximum of 10 values. :type comment: str or unicode :param comment: The comment associated with the Distribution. """ new_config = StreamingDistributionConfig(self.connection, self.config.origin, self.config.enabled, self.config.caller_reference, self.config.cnames, self.config.comment, self.config.trusted_signers) if enabled is not None: new_config.enabled = enabled if cnames is not None: new_config.cnames = cnames if comment is not None: new_config.comment = comment self.etag = self.connection.set_streaming_distribution_config(self.id, self.etag, new_config) self.config = new_config self._object_class = StreamingObject def delete(self): self.connection.delete_streaming_distribution(self.id, self.etag) # FILE boto/boto/cloudfront/distribution.py class DistributionSummary(object): def __init__(self, connection=None, domain_name='', id='', last_modified_time=None, status='', origin=None, cname='', comment='', enabled=False): self.connection = connection self.domain_name = domain_name self.id = id self.last_modified_time = last_modified_time self.status = status self.origin = origin self.enabled = enabled self.cnames = [] if cname: self.cnames.append(cname) self.comment = comment self.trusted_signers = None self.etag = None self.streaming = False def __repr__(self): return "DistributionSummary:%s" % self.domain_name def startElement(self, name, attrs, connection): if name == 'TrustedSigners': self.trusted_signers = TrustedSigners() return self.trusted_signers elif name == 'S3Origin': self.origin = S3Origin() return self.origin elif name == 'CustomOrigin': self.origin = CustomOrigin() return self.origin return None def endElement(self, name, value, connection): if name == 'Id': self.id = value elif name == 'Status': self.status = value elif name == 'LastModifiedTime': self.last_modified_time = value elif name == 'DomainName': self.domain_name = value elif name == 'Origin': self.origin = value elif name == 'CNAME': self.cnames.append(value) elif name == 'Comment': self.comment = value elif name == 'Enabled': if value.lower() == 'true': self.enabled = True else: self.enabled = False elif name == 'StreamingDistributionSummary': self.streaming = True else: setattr(self, name, value) def get_distribution(self): return self.connection.get_distribution_info(self.id) # FILE boto/boto/cloudfront/object.py class StreamingObject(Object): def url(self, scheme='rtmp'): return super(StreamingObject, self).url(scheme) # FILE boto/boto/cloudfront/object.py class Object(Key): def __init__(self, bucket, name=None): super(Object, self).__init__(bucket, name=name) self.distribution = bucket.distribution def __repr__(self): ... def url(self, scheme='http'): url = '%s://' % scheme url += self.distribution.domain_name if scheme.lower().startswith('rtmp'): url += '/cfx/st/' else: url += '/' url += self.name return url Based on the information above, please complete the function: #CURRENT_FILE: boto/boto/cloudfront/distribution.py import uuid import base64 import time from boto.compat import six, json from boto.cloudfront.identity import OriginAccessIdentity from boto.cloudfront.object import Object, StreamingObject from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners from boto.cloudfront.logging import LoggingInfo from boto.cloudfront.origin import S3Origin, CustomOrigin from boto.s3.acl import ACL from boto.s3.connection import S3Connection import rsa class Distribution(object): def __init__(self, connection=None, config=None, domain_name='', id='', last_modified_time=None, status=''): self.connection = connection self.config = config self.domain_name = domain_name self.id = id self.last_modified_time = last_modified_time self.status = status self.in_progress_invalidation_batches = 0 self.active_signers = None self.etag = None self._bucket = None self._object_class = Object def __repr__(self): return "Distribution:%s" % self.domain_name def startElement(self, name, attrs, connection): if name == 'DistributionConfig': self.config = DistributionConfig() return self.config elif name == 'ActiveTrustedSigners': self.active_signers = ActiveTrustedSigners() return self.active_signers else: return None def endElement(self, name, value, connection): if name == 'Id': self.id = value elif name == 'LastModifiedTime': self.last_modified_time = value elif name == 'Status': self.status = value elif name == 'InProgressInvalidationBatches': self.in_progress_invalidation_batches = int(value) elif name == 'DomainName': self.domain_name = value else: setattr(self, name, value) def update(self, enabled=None, cnames=None, comment=None): """ Update the configuration of the Distribution. The only values of the DistributionConfig that can be directly updated are: * CNAMES * Comment * Whether the Distribution is enabled or not Any changes to the ``trusted_signers`` or ``origin`` properties of this distribution's current config object will also be included in the update. Therefore, to set the origin access identity for this distribution, set ``Distribution.config.origin.origin_access_identity`` before calling this update method. :type enabled: bool :param enabled: Whether the Distribution is active or not. :type cnames: list of str :param cnames: The DNS CNAME's associated with this Distribution. Maximum of 10 values. :type comment: str or unicode :param comment: The comment associated with the Distribution. """ new_config = DistributionConfig(self.connection, self.config.origin, self.config.enabled, self.config.caller_reference, self.config.cnames, self.config.comment, self.config.trusted_signers, self.config.default_root_object) if enabled is not None: new_config.enabled = enabled if cnames is not None: new_config.cnames = cnames if comment is not None: new_config.comment = comment self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config) self.config = new_config self._object_class = Object def enable(self): """ Activate the Distribution. A convenience wrapper around the update method. """ self.update(enabled=True) def disable(self): """ Deactivate the Distribution. A convenience wrapper around the update method. """ self.update(enabled=False) def delete(self): """ Delete this CloudFront Distribution. The content associated with the Distribution is not deleted from the underlying Origin bucket in S3. """ self.connection.delete_distribution(self.id, self.etag) def _get_bucket(self): if isinstance(self.config.origin, S3Origin): if not self._bucket: bucket_dns_name = self.config.origin.dns_name bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '') from boto.s3.connection import S3Connection s3 = S3Connection(self.connection.aws_access_key_id, self.connection.aws_secret_access_key, proxy=self.connection.proxy, proxy_port=self.connection.proxy_port, proxy_user=self.connection.proxy_user, proxy_pass=self.connection.proxy_pass) self._bucket = s3.get_bucket(bucket_name) self._bucket.distribution = self self._bucket.set_key_class(self._object_class) return self._bucket else: raise NotImplementedError('Unable to get_objects on CustomOrigin') def get_objects(self): """ Return a list of all content objects in this distribution. :rtype: list of :class:`boto.cloudfront.object.Object` :return: The content objects """ bucket = self._get_bucket() objs = [] for key in bucket: objs.append(key) return objs def set_permissions(self, object, replace=False): """ Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """ if isinstance(self.config.origin, S3Origin): if self.config.origin.origin_access_identity: id = self.config.origin.origin_access_identity.split('/')[-1] oai = self.connection.get_origin_access_identity_info(id) policy = object.get_acl() if replace: policy.acl = ACL() policy.acl.add_user_grant('READ', oai.s3_user_id) object.set_acl(policy) else: object.set_canned_acl('public-read') def set_permissions_all(self, replace=False): """ Sets the S3 ACL grants for all objects in the Distribution to the appropriate value based on the type of Distribution. :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """ bucket = self._get_bucket() for key in bucket: self.set_permissions(key, replace) def add_object(self, name, content, headers=None, replace=True): """ Adds a new content object to the Distribution. The content for the object will be copied to a new Key in the S3 Bucket and the permissions will be set appropriately for the type of Distribution. :type name: str or unicode :param name: The name or key of the new object. :type content: file-like object :param content: A file-like object that contains the content for the new object. :type headers: dict :param headers: A dictionary containing additional headers you would like associated with the new object in S3. :rtype: :class:`boto.cloudfront.object.Object` :return: The newly created object. """ if self.config.origin.origin_access_identity: policy = 'private' else: policy = 'public-read' bucket = self._get_bucket() object = bucket.new_key(name) object.set_contents_from_file(content, headers=headers, policy=policy) if self.config.origin.origin_access_identity: self.set_permissions(object, replace) return object def create_signed_url(self, url, keypair_id, expire_time=None, valid_after_time=None, ip_address=None, policy_url=None, private_key_file=None, private_key_string=None): """ Creates a signed CloudFront URL that is only valid within the specified parameters. :type url: str :param url: The URL of the protected object. :type keypair_id: str :param keypair_id: The keypair ID of the Amazon KeyPair used to sign theURL. This ID MUST correspond to the private key specified with private_key_file or private_key_string. :type expire_time: int :param expire_time: The expiry time of the URL. If provided, the URL will expire after the time has passed. If not provided the URL will never expire. Format is a unix epoch. Use int(time.time() + duration_in_sec). :type valid_after_time: int :param valid_after_time: If provided, the URL will not be valid until after valid_after_time. Format is a unix epoch. Use int(time.time() + secs_until_valid). :type ip_address: str :param ip_address: If provided, only allows access from the specified IP address. Use '192.168.0.10' for a single IP or use '192.168.0.0/24' CIDR notation for a subnet. :type policy_url: str :param policy_url: If provided, allows the signature to contain wildcard globs in the URL. For example, you could provide: 'http://example.com/media/\*' and the policy and signature would allow access to all contents of the media subdirectory. If not specified, only allow access to the exact url provided in 'url'. :type private_key_file: str or file object. :param private_key_file: If provided, contains the filename of the private key file used for signing or an open file object containing the private key contents. Only one of private_key_file or private_key_string can be provided. :type private_key_string: str :param private_key_string: If provided, contains the private key string used for signing. Only one of private_key_file or private_key_string can be provided. :rtype: str :return: The signed URL. """ # Get the required parameters params = self._create_signing_params( url=url, keypair_id=keypair_id, expire_time=expire_time, valid_after_time=valid_after_time, ip_address=ip_address, policy_url=policy_url, private_key_file=private_key_file, private_key_string=private_key_string) #combine these into a full url if "?" in url: sep = "&" else: sep = "?" signed_url_params = [] for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]: if key in params: param = "%s=%s" % (key, params[key]) signed_url_params.append(param) signed_url = url + sep + "&".join(signed_url_params) return signed_url def _create_signing_params(self, url, keypair_id, expire_time=None, valid_after_time=None, ip_address=None, policy_url=None, private_key_file=None, private_key_string=None): """ Creates the required URL parameters for a signed URL. """ params = {} # Check if we can use a canned policy if expire_time and not valid_after_time and not ip_address and not policy_url: # we manually construct this policy string to ensure formatting # matches signature policy = self._canned_policy(url, expire_time) params["Expires"] = str(expire_time) else: # If no policy_url is specified, default to the full url. if policy_url is None: policy_url = url # Can't use canned policy policy = self._custom_policy(policy_url, expires=expire_time, valid_after=valid_after_time, ip_address=ip_address) encoded_policy = self._url_base64_encode(policy) params["Policy"] = encoded_policy #sign the policy signature = self._sign_string(policy, private_key_file, private_key_string) #now base64 encode the signature (URL safe as well) encoded_signature = self._url_base64_encode(signature) params["Signature"] = encoded_signature params["Key-Pair-Id"] = keypair_id return params @staticmethod def _canned_policy(resource, expires): """ Creates a canned policy string. """
boto/boto/cloudfront/distribution.py
boto.cloudfront.invalidation.InvalidationBatch.escape
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE boto/boto/resultset.py class ResultSet(list): """ The ResultSet is used to pass results back from the Amazon services to the client. It is light wrapper around Python's :py:class:`list` class, with some additional methods for parsing XML results from AWS. Because I don't really want any dependencies on external libraries, I'm using the standard SAX parser that comes with Python. The good news is that it's quite fast and efficient but it makes some things rather difficult. You can pass in, as the marker_elem parameter, a list of tuples. Each tuple contains a string as the first element which represents the XML element that the resultset needs to be on the lookout for and a Python class as the second element of the tuple. Each time the specified element is found in the XML, a new instance of the class will be created and popped onto the stack. :ivar str next_token: A hash used to assist in paging through very long result sets. In most cases, passing this value to certain methods will give you another 'page' of results. """ def __init__(self, marker_elem=None): list.__init__(self) if isinstance(marker_elem, list): self.markers = marker_elem else: self.markers = [] self.marker = None self.key_marker = None self.next_marker = None # avail when delimiter used self.next_key_marker = None self.next_upload_id_marker = None self.next_version_id_marker = None self.next_generation_marker = None self.version_id_marker = None self.is_truncated = False self.next_token = None self.status = True def startElement(self, name, attrs, connection): ... def to_boolean(self, value, true_value='true'): ... def endElement(self, name, value, connection): ... # FILE boto/boto/cloudfront/invalidation.py class InvalidationSummary(object): """ Represents InvalidationSummary complex type in CloudFront API that lists the id and status of a given invalidation request. """ def __init__(self, connection=None, distribution_id=None, id='', status=''): self.connection = connection self.distribution_id = distribution_id self.id = id self.status = status def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def get_distribution(self): """ Returns a Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary. :rtype: :class:`boto.cloudfront.distribution.Distribution` :returns: A Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary """ return self.connection.get_distribution_info(self.distribution_id) def get_invalidation_request(self): """ Returns an InvalidationBatch object representing the invalidation request referred to in the InvalidationSummary. :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch` :returns: An InvalidationBatch object representing the invalidation request referred to by the InvalidationSummary """ ... # FILE boto/boto/resultset.py class ResultSet(list): """ The ResultSet is used to pass results back from the Amazon services to the client. It is light wrapper around Python's :py:class:`list` class, with some additional methods for parsing XML results from AWS. Because I don't really want any dependencies on external libraries, I'm using the standard SAX parser that comes with Python. The good news is that it's quite fast and efficient but it makes some things rather difficult. You can pass in, as the marker_elem parameter, a list of tuples. Each tuple contains a string as the first element which represents the XML element that the resultset needs to be on the lookout for and a Python class as the second element of the tuple. Each time the specified element is found in the XML, a new instance of the class will be created and popped onto the stack. :ivar str next_token: A hash used to assist in paging through very long result sets. In most cases, passing this value to certain methods will give you another 'page' of results. """ def __init__(self, marker_elem=None): list.__init__(self) if isinstance(marker_elem, list): self.markers = marker_elem else: self.markers = [] self.marker = None self.key_marker = None self.next_marker = None # avail when delimiter used self.next_key_marker = None self.next_upload_id_marker = None self.next_version_id_marker = None self.next_generation_marker = None self.version_id_marker = None self.is_truncated = False self.next_token = None self.status = True def startElement(self, name, attrs, connection): ... def to_boolean(self, value, true_value='true'): ... def endElement(self, name, value, connection): if name == 'IsTruncated': self.is_truncated = self.to_boolean(value) elif name == 'Marker': self.marker = value elif name == 'KeyMarker': self.key_marker = value elif name == 'NextMarker': self.next_marker = value elif name == 'NextKeyMarker': self.next_key_marker = value elif name == 'VersionIdMarker': self.version_id_marker = value elif name == 'NextVersionIdMarker': self.next_version_id_marker = value elif name == 'NextGenerationMarker': self.next_generation_marker = value elif name == 'UploadIdMarker': self.upload_id_marker = value elif name == 'NextUploadIdMarker': self.next_upload_id_marker = value elif name == 'Bucket': self.bucket = value elif name == 'MaxUploads': self.max_uploads = int(value) elif name == 'MaxItems': self.max_items = int(value) elif name == 'Prefix': self.prefix = value elif name == 'return': self.status = self.to_boolean(value) elif name == 'StatusCode': self.status = self.to_boolean(value, 'Success') elif name == 'ItemName': self.append(value) elif name == 'NextToken': self.next_token = value elif name == 'nextToken': self.next_token = value # Code exists which expects nextToken to be available, so we # set it here to remain backwards-compatibile. self.nextToken = value elif name == 'BoxUsage': try: connection.box_usage += float(value) except: pass elif name == 'IsValid': self.status = self.to_boolean(value, 'True') else: setattr(self, name, value) # FILE boto/boto/cloudfront/invalidation.py class InvalidationListResultSet(object): """ A resultset for listing invalidations on a given CloudFront distribution. Implements the iterator interface and transparently handles paging results from CF so even if you have many thousands of invalidations on the distribution you can iterate over all invalidations in a reasonably efficient manner. """ def __init__(self, markers=None, connection=None, distribution_id=None, invalidations=None, marker='', next_marker=None, max_items=None, is_truncated=False): self.markers = markers or [] self.connection = connection self.distribution_id = distribution_id self.marker = marker self.next_marker = next_marker self.max_items = max_items self.auto_paginate = max_items is None self.is_truncated = is_truncated self._inval_cache = invalidations or [] def __iter__(self): """ A generator function for listing invalidation requests for a given CloudFront distribution. """ conn = self.connection distribution_id = self.distribution_id result_set = self for inval in result_set._inval_cache: yield inval if not self.auto_paginate: return while result_set.is_truncated: result_set = conn.get_invalidation_requests(distribution_id, marker=result_set.next_marker, max_items=result_set.max_items) for i in result_set._inval_cache: yield i def startElement(self, name, attrs, connection): for root_elem, handler in self.markers: if name == root_elem: obj = handler(connection, distribution_id=self.distribution_id) self._inval_cache.append(obj) return obj def endElement(self, name, value, connection): if name == 'IsTruncated': self.is_truncated = self.to_boolean(value) elif name == 'Marker': self.marker = value elif name == 'NextMarker': self.next_marker = value elif name == 'MaxItems': self.max_items = int(value) def to_boolean(self, value, true_value='true'): if value == true_value: return True else: return False # FILE boto/boto/resultset.py class ResultSet(list): """ The ResultSet is used to pass results back from the Amazon services to the client. It is light wrapper around Python's :py:class:`list` class, with some additional methods for parsing XML results from AWS. Because I don't really want any dependencies on external libraries, I'm using the standard SAX parser that comes with Python. The good news is that it's quite fast and efficient but it makes some things rather difficult. You can pass in, as the marker_elem parameter, a list of tuples. Each tuple contains a string as the first element which represents the XML element that the resultset needs to be on the lookout for and a Python class as the second element of the tuple. Each time the specified element is found in the XML, a new instance of the class will be created and popped onto the stack. :ivar str next_token: A hash used to assist in paging through very long result sets. In most cases, passing this value to certain methods will give you another 'page' of results. """ def __init__(self, marker_elem=None): list.__init__(self) if isinstance(marker_elem, list): self.markers = marker_elem else: self.markers = [] self.marker = None self.key_marker = None self.next_marker = None # avail when delimiter used self.next_key_marker = None self.next_upload_id_marker = None self.next_version_id_marker = None self.next_generation_marker = None self.version_id_marker = None self.is_truncated = False self.next_token = None self.status = True def startElement(self, name, attrs, connection): for t in self.markers: if name == t[0]: obj = t[1](connection) self.append(obj) return obj if name == 'Owner': # Makes owner available for get_service and # perhaps other lists where not handled by # another element. self.owner = User() return self.owner return None def to_boolean(self, value, true_value='true'): if value == true_value: return True else: return False def endElement(self, name, value, connection): if name == 'IsTruncated': self.is_truncated = self.to_boolean(value) elif name == 'Marker': self.marker = value elif name == 'KeyMarker': self.key_marker = value elif name == 'NextMarker': self.next_marker = value elif name == 'NextKeyMarker': self.next_key_marker = value elif name == 'VersionIdMarker': self.version_id_marker = value elif name == 'NextVersionIdMarker': self.next_version_id_marker = value elif name == 'NextGenerationMarker': self.next_generation_marker = value elif name == 'UploadIdMarker': self.upload_id_marker = value elif name == 'NextUploadIdMarker': self.next_upload_id_marker = value elif name == 'Bucket': self.bucket = value elif name == 'MaxUploads': self.max_uploads = int(value) elif name == 'MaxItems': self.max_items = int(value) elif name == 'Prefix': self.prefix = value elif name == 'return': self.status = self.to_boolean(value) elif name == 'StatusCode': self.status = self.to_boolean(value, 'Success') elif name == 'ItemName': self.append(value) elif name == 'NextToken': self.next_token = value elif name == 'nextToken': self.next_token = value # Code exists which expects nextToken to be available, so we # set it here to remain backwards-compatibile. self.nextToken = value elif name == 'BoxUsage': try: connection.box_usage += float(value) except: pass elif name == 'IsValid': self.status = self.to_boolean(value, 'True') else: setattr(self, name, value) # FILE boto/boto/cloudfront/invalidation.py class InvalidationSummary(object): """ Represents InvalidationSummary complex type in CloudFront API that lists the id and status of a given invalidation request. """ def __init__(self, connection=None, distribution_id=None, id='', status=''): self.connection = connection self.distribution_id = distribution_id self.id = id self.status = status def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def get_distribution(self): """ Returns a Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary. :rtype: :class:`boto.cloudfront.distribution.Distribution` :returns: A Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary """ ... def get_invalidation_request(self): """ Returns an InvalidationBatch object representing the invalidation request referred to in the InvalidationSummary. :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch` :returns: An InvalidationBatch object representing the invalidation request referred to by the InvalidationSummary """ ... # FILE boto/boto/cloudfront/invalidation.py class InvalidationSummary(object): """ Represents InvalidationSummary complex type in CloudFront API that lists the id and status of a given invalidation request. """ def __init__(self, connection=None, distribution_id=None, id='', status=''): self.connection = connection self.distribution_id = distribution_id self.id = id self.status = status def __repr__(self): ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def get_distribution(self): """ Returns a Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary. :rtype: :class:`boto.cloudfront.distribution.Distribution` :returns: A Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary """ ... def get_invalidation_request(self): """ Returns an InvalidationBatch object representing the invalidation request referred to in the InvalidationSummary. :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch` :returns: An InvalidationBatch object representing the invalidation request referred to by the InvalidationSummary """ return self.connection.invalidation_request_status( self.distribution_id, self.id) # FILE boto/boto/cloudfront/invalidation.py class InvalidationListResultSet(object): """ A resultset for listing invalidations on a given CloudFront distribution. Implements the iterator interface and transparently handles paging results from CF so even if you have many thousands of invalidations on the distribution you can iterate over all invalidations in a reasonably efficient manner. """ def __init__(self, markers=None, connection=None, distribution_id=None, invalidations=None, marker='', next_marker=None, max_items=None, is_truncated=False): self.markers = markers or [] self.connection = connection self.distribution_id = distribution_id self.marker = marker self.next_marker = next_marker self.max_items = max_items self.auto_paginate = max_items is None self.is_truncated = is_truncated self._inval_cache = invalidations or [] def __iter__(self): """ A generator function for listing invalidation requests for a given CloudFront distribution. """ ... def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def to_boolean(self, value, true_value='true'): ... # FILE boto/boto/cloudfront/invalidation.py class InvalidationSummary(object): """ Represents InvalidationSummary complex type in CloudFront API that lists the id and status of a given invalidation request. """ def __init__(self, connection=None, distribution_id=None, id='', status=''): self.connection = connection self.distribution_id = distribution_id self.id = id self.status = status def __repr__(self): return '<InvalidationSummary: %s>' % self.id def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'Id': self.id = value elif name == 'Status': self.status = value def get_distribution(self): """ Returns a Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary. :rtype: :class:`boto.cloudfront.distribution.Distribution` :returns: A Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary """ return self.connection.get_distribution_info(self.distribution_id) def get_invalidation_request(self): """ Returns an InvalidationBatch object representing the invalidation request referred to in the InvalidationSummary. :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch` :returns: An InvalidationBatch object representing the invalidation request referred to by the InvalidationSummary """ return self.connection.invalidation_request_status( self.distribution_id, self.id) # FILE boto/boto/cloudfront/invalidation.py class InvalidationListResultSet(object): """ A resultset for listing invalidations on a given CloudFront distribution. Implements the iterator interface and transparently handles paging results from CF so even if you have many thousands of invalidations on the distribution you can iterate over all invalidations in a reasonably efficient manner. """ def __init__(self, markers=None, connection=None, distribution_id=None, invalidations=None, marker='', next_marker=None, max_items=None, is_truncated=False): self.markers = markers or [] self.connection = connection self.distribution_id = distribution_id self.marker = marker self.next_marker = next_marker self.max_items = max_items self.auto_paginate = max_items is None self.is_truncated = is_truncated self._inval_cache = invalidations or [] def __iter__(self): """ A generator function for listing invalidation requests for a given CloudFront distribution. """ conn = self.connection distribution_id = self.distribution_id result_set = self for inval in result_set._inval_cache: yield inval if not self.auto_paginate: return while result_set.is_truncated: result_set = conn.get_invalidation_requests(distribution_id, marker=result_set.next_marker, max_items=result_set.max_items) for i in result_set._inval_cache: yield i def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def to_boolean(self, value, true_value='true'): ... # FILE boto/boto/cloudfront/invalidation.py class InvalidationSummary(object): """ Represents InvalidationSummary complex type in CloudFront API that lists the id and status of a given invalidation request. """ def __init__(self, connection=None, distribution_id=None, id='', status=''): self.connection = connection self.distribution_id = distribution_id self.id = id self.status = status def __repr__(self): return '<InvalidationSummary: %s>' % self.id def startElement(self, name, attrs, connection): ... def endElement(self, name, value, connection): ... def get_distribution(self): """ Returns a Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary. :rtype: :class:`boto.cloudfront.distribution.Distribution` :returns: A Distribution object representing the parent CloudFront distribution of the invalidation request listed in the InvalidationSummary """ ... def get_invalidation_request(self): """ Returns an InvalidationBatch object representing the invalidation request referred to in the InvalidationSummary. :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch` :returns: An InvalidationBatch object representing the invalidation request referred to by the InvalidationSummary """ ... Based on the information above, please complete the function: #CURRENT_FILE: boto/boto/cloudfront/invalidation.py import uuid from boto.compat import urllib from boto.resultset import ResultSet class InvalidationBatch(object): """A simple invalidation request. :see: http://docs.amazonwebservices.com/AmazonCloudFront/2010-08-01/APIReference/index.html?InvalidationBatchDatatype.html """ def __init__(self, paths=None, connection=None, distribution=None, caller_reference=''): """Create a new invalidation request: :paths: An array of paths to invalidate """ self.paths = paths or [] self.distribution = distribution self.caller_reference = caller_reference if not self.caller_reference: self.caller_reference = str(uuid.uuid4()) # If we passed in a distribution, # then we use that as the connection object if distribution: self.connection = distribution else: self.connection = connection def __repr__(self): return '<InvalidationBatch: %s>' % self.id def add(self, path): """Add another path to this invalidation request""" return self.paths.append(path) def remove(self, path): """Remove a path from this invalidation request""" return self.paths.remove(path) def __iter__(self): return iter(self.paths) def __getitem__(self, i): return self.paths[i] def __setitem__(self, k, v): self.paths[k] = v def escape(self, p): """Escape a path, make sure it begins with a slash and contains no invalid characters. Retain literal wildcard characters."""
boto/boto/cloudfront/invalidation.py
proxybroker.utils.get_status_code
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE proxybroker/proxybroker/utils.py def parse_headers(headers): headers = headers.decode('utf-8', 'ignore').split('\r\n') _headers = {} _headers.update(parse_status_line(headers.pop(0))) for h in headers: if not h: break name, val = h.split(':', 1) _headers[name.strip().title()] = val.strip() if ':' in _headers.get('Host', ''): host, port = _headers['Host'].split(':') _headers['Host'], _headers['Port'] = host, int(port) return _headers Based on the information above, please complete the function: #CURRENT_FILE: proxybroker/proxybroker/utils.py import logging import os import os.path import random import re import shutil import tarfile import tempfile import urllib.request from . import __version__ as version from .errors import BadStatusLine def get_status_code(resp, start=9, stop=12):
proxybroker/proxybroker/utils.py
authlib.oauth2.rfc6749.util.scope_to_list
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE Authlib/authlib/oauth2/rfc6749/util.py def list_to_scope(scope): """Convert a list of scopes to a space separated string.""" if isinstance(scope, (set, tuple, list)): return " ".join([to_unicode(s) for s in scope]) if scope is None: return scope return to_unicode(scope) # FILE Authlib/authlib/common/encoding.py def to_unicode(x, charset='utf-8', errors='strict'): if x is None or isinstance(x, str): return x if isinstance(x, bytes): return x.decode(charset, errors) return str(x) Based on the information above, please complete the function: #CURRENT_FILE: Authlib/authlib/oauth2/rfc6749/util.py import base64 import binascii from authlib.common.encoding import to_unicode def scope_to_list(scope): """Convert a space separated string to a list of scopes."""
Authlib/authlib/oauth2/rfc6749/util.py
authlib.common.encoding.to_unicode
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE Authlib/authlib/common/encoding.py def base64_to_int(s): data = urlsafe_b64decode(to_bytes(s, charset='ascii')) buf = struct.unpack('%sB' % len(data), data) return int(''.join(["%02x" % byte for byte in buf]), 16) # FILE Authlib/authlib/common/encoding.py def to_bytes(x, charset='utf-8', errors='strict'): if x is None: return None if isinstance(x, bytes): return x if isinstance(x, str): return x.encode(charset, errors) if isinstance(x, (int, float)): return str(x).encode(charset, errors) return bytes(x) Based on the information above, please complete the function: #CURRENT_FILE: Authlib/authlib/common/encoding.py import json import base64 import struct def to_unicode(x, charset='utf-8', errors='strict'):
Authlib/authlib/common/encoding.py
authlib.common.encoding.to_bytes
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE Authlib/authlib/common/encoding.py def int_to_base64(num): if num < 0: raise ValueError('Must be a positive integer') s = num.to_bytes((num.bit_length() + 7) // 8, 'big', signed=False) return to_unicode(urlsafe_b64encode(s)) # FILE Authlib/authlib/common/encoding.py def to_unicode(x, charset='utf-8', errors='strict'): if x is None or isinstance(x, str): return x if isinstance(x, bytes): return x.decode(charset, errors) return str(x) Based on the information above, please complete the function: #CURRENT_FILE: Authlib/authlib/common/encoding.py import json import base64 import struct def to_bytes(x, charset='utf-8', errors='strict'):
Authlib/authlib/common/encoding.py
authlib.common.encoding.urlsafe_b64decode
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE Authlib/authlib/common/encoding.py def int_to_base64(num): if num < 0: raise ValueError('Must be a positive integer') s = num.to_bytes((num.bit_length() + 7) // 8, 'big', signed=False) return to_unicode(urlsafe_b64encode(s)) # FILE Authlib/authlib/common/encoding.py def base64_to_int(s): data = urlsafe_b64decode(to_bytes(s, charset='ascii')) buf = struct.unpack('%sB' % len(data), data) return int(''.join(["%02x" % byte for byte in buf]), 16) # FILE Authlib/authlib/common/encoding.py def urlsafe_b64encode(s): return base64.urlsafe_b64encode(s).rstrip(b'=') # FILE Authlib/authlib/common/encoding.py def to_bytes(x, charset='utf-8', errors='strict'): if x is None: return None if isinstance(x, bytes): return x if isinstance(x, str): return x.encode(charset, errors) if isinstance(x, (int, float)): return str(x).encode(charset, errors) return bytes(x) # FILE Authlib/authlib/common/encoding.py def to_unicode(x, charset='utf-8', errors='strict'): if x is None or isinstance(x, str): return x if isinstance(x, bytes): return x.decode(charset, errors) return str(x) Based on the information above, please complete the function: #CURRENT_FILE: Authlib/authlib/common/encoding.py import json import base64 import struct def urlsafe_b64decode(s):
Authlib/authlib/common/encoding.py
csvs_to_sqlite.utils.table_exists
You are a Python programmer. Here is all the context you may find useful to complete the function: # LIB six.py def b(s): return s # LIB six.py def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") Based on the information above, please complete the function: #CURRENT_FILE: csvs-to-sqlite/csvs_to_sqlite/utils.py import dateparser import os import fnmatch import hashlib import lru import pandas as pd import numpy as np import re import six import sqlite3 from six.moves.urllib.parse import urlparse from six.moves.urllib.parse import uses_relative, uses_netloc, uses_params import click def table_exists(conn, table):
csvs-to-sqlite/csvs_to_sqlite/utils.py
sqlitedict.SqliteDict.get_tablenames
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE sqlitedict/sqlitedict.py def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value # FILE sqlitedict/sqlitedict.py class SqliteMultithread(threading.Thread): """ Wrap sqlite connection in a way that allows concurrent requests from multiple threads. This is done by internally queueing the requests and processing them sequentially in a separate thread (in the same order they arrived). """ def __init__(self, filename, autocommit, journal_mode, outer_stack=True): super(SqliteMultithread, self).__init__() self.filename = filename self.autocommit = autocommit self.journal_mode = journal_mode # use request queue of unlimited size self.reqs = Queue() self.daemon = True self._outer_stack = outer_stack self.log = logging.getLogger('sqlitedict.SqliteMultithread') # # Parts of this object's state get accessed from different threads, so # we use synchronization to avoid race conditions. For example, # .exception gets set inside the new daemon thread that we spawned, but # gets read from the main thread. This is particularly important # during initialization: the Thread needs some time to actually start # working, and until this happens, any calls to e.g. # check_raise_error() will prematurely return None, meaning all is # well. If the that connection happens to fail, we'll never know about # it, and instead wait for a result that never arrives (effectively, # deadlocking). Locking solves this problem by eliminating the race # condition. # self._lock = threading.Lock() self._lock.acquire() self.exception = None self.start() def _connect(self): """Connect to the underlying database. Raises an exception on failure. Returns the connection and cursor on success. """ try: if self.autocommit: conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False) else: conn = sqlite3.connect(self.filename, check_same_thread=False) except Exception: self.log.exception("Failed to initialize connection for filename: %s" % self.filename) self.exception = sys.exc_info() raise try: conn.execute('PRAGMA journal_mode = %s' % self.journal_mode) conn.text_factory = str cursor = conn.cursor() conn.commit() cursor.execute('PRAGMA synchronous=OFF') except Exception: self.log.exception("Failed to execute PRAGMA statements.") self.exception = sys.exc_info() raise return conn, cursor def run(self): ... def check_raise_error(self): """ Check for and raise exception for any previous sqlite query. For the `execute*` family of method calls, such calls are non-blocking and any exception raised in the thread cannot be handled by the calling Thread (usually MainThread). This method is called on `close`, and prior to any subsequent calls to the `execute*` methods to check for and raise an exception in a previous call to the MainThread. """ ... def execute(self, req, arg=None, res=None): """ `execute` calls are non-blocking: just queue up the request and return immediately. :param req: The request (an SQL command) :param arg: Arguments to the SQL command :param res: A queue in which to place responses as they become available """ ... def executemany(self, req, items): ... def select(self, req, arg=None): """ Unlike sqlite's native select, this select doesn't handle iteration efficiently. The result of `select` starts filling up with values as soon as the request is dequeued, and although you can iterate over the result normally (`for res in self.select(): ...`), the entire result will be in memory. """ ... def select_one(self, req, arg=None): """Return only the first row of the SELECT, or None if there are no matching rows.""" ... def commit(self, blocking=True): ... def close(self, force=False): ... # FILE sqlitedict/sqlitedict.py class SqliteMultithread(threading.Thread): """ Wrap sqlite connection in a way that allows concurrent requests from multiple threads. This is done by internally queueing the requests and processing them sequentially in a separate thread (in the same order they arrived). """ def __init__(self, filename, autocommit, journal_mode, outer_stack=True): super(SqliteMultithread, self).__init__() self.filename = filename self.autocommit = autocommit self.journal_mode = journal_mode # use request queue of unlimited size self.reqs = Queue() self.daemon = True self._outer_stack = outer_stack self.log = logging.getLogger('sqlitedict.SqliteMultithread') # # Parts of this object's state get accessed from different threads, so # we use synchronization to avoid race conditions. For example, # .exception gets set inside the new daemon thread that we spawned, but # gets read from the main thread. This is particularly important # during initialization: the Thread needs some time to actually start # working, and until this happens, any calls to e.g. # check_raise_error() will prematurely return None, meaning all is # well. If the that connection happens to fail, we'll never know about # it, and instead wait for a result that never arrives (effectively, # deadlocking). Locking solves this problem by eliminating the race # condition. # self._lock = threading.Lock() self._lock.acquire() self.exception = None self.start() def _connect(self): """Connect to the underlying database. Raises an exception on failure. Returns the connection and cursor on success. """ ... def run(self): ... def check_raise_error(self): """ Check for and raise exception for any previous sqlite query. For the `execute*` family of method calls, such calls are non-blocking and any exception raised in the thread cannot be handled by the calling Thread (usually MainThread). This method is called on `close`, and prior to any subsequent calls to the `execute*` methods to check for and raise an exception in a previous call to the MainThread. """ ... def execute(self, req, arg=None, res=None): """ `execute` calls are non-blocking: just queue up the request and return immediately. :param req: The request (an SQL command) :param arg: Arguments to the SQL command :param res: A queue in which to place responses as they become available """ self.check_raise_error() stack = None if self._outer_stack: # NOTE: This might be a lot of information to pump into an input # queue, affecting performance. I've also seen earlier versions of # jython take a severe performance impact for throwing exceptions # so often. stack = traceback.extract_stack()[:-1] # # We pass a weak reference to the response queue instead of a regular # reference, because we want the queues to be garbage-collected # more aggressively. # res_ref = None if res: res_ref = weakref.ref(res) self.reqs.put((req, arg or tuple(), res_ref, stack)) def executemany(self, req, items): ... def select(self, req, arg=None): """ Unlike sqlite's native select, this select doesn't handle iteration efficiently. The result of `select` starts filling up with values as soon as the request is dequeued, and although you can iterate over the result normally (`for res in self.select(): ...`), the entire result will be in memory. """ ... def select_one(self, req, arg=None): """Return only the first row of the SELECT, or None if there are no matching rows.""" ... def commit(self, blocking=True): ... def close(self, force=False): ... # FILE sqlitedict/sqlitedict.py class SqliteMultithread(threading.Thread): """ Wrap sqlite connection in a way that allows concurrent requests from multiple threads. This is done by internally queueing the requests and processing them sequentially in a separate thread (in the same order they arrived). """ def __init__(self, filename, autocommit, journal_mode, outer_stack=True): super(SqliteMultithread, self).__init__() self.filename = filename self.autocommit = autocommit self.journal_mode = journal_mode # use request queue of unlimited size self.reqs = Queue() self.daemon = True self._outer_stack = outer_stack self.log = logging.getLogger('sqlitedict.SqliteMultithread') # # Parts of this object's state get accessed from different threads, so # we use synchronization to avoid race conditions. For example, # .exception gets set inside the new daemon thread that we spawned, but # gets read from the main thread. This is particularly important # during initialization: the Thread needs some time to actually start # working, and until this happens, any calls to e.g. # check_raise_error() will prematurely return None, meaning all is # well. If the that connection happens to fail, we'll never know about # it, and instead wait for a result that never arrives (effectively, # deadlocking). Locking solves this problem by eliminating the race # condition. # self._lock = threading.Lock() self._lock.acquire() self.exception = None self.start() def _connect(self): """Connect to the underlying database. Raises an exception on failure. Returns the connection and cursor on success. """ ... def run(self): # # Nb. this is what actually runs inside the new daemon thread. # self._lock is locked at this stage - see the initializer function. # try: conn, cursor = self._connect() finally: self._lock.release() res_ref = None while True: # # req: an SQL command or one of the --magic-- commands we use internally # arg: arguments for the command # res_ref: a weak reference to the queue into which responses must be placed # outer_stack: the outer stack, for producing more informative traces in case of error # req, arg, res_ref, outer_stack = self.reqs.get() if req == _REQUEST_CLOSE: assert res_ref, ('--close-- without return queue', res_ref) break elif req == _REQUEST_COMMIT: conn.commit() _put(res_ref, _RESPONSE_NO_MORE) else: try: cursor.execute(req, arg) except Exception: with self._lock: self.exception = (e_type, e_value, e_tb) = sys.exc_info() inner_stack = traceback.extract_stack() # An exception occurred in our thread, but we may not # immediately able to throw it in our calling thread, if it has # no return `res` queue: log as level ERROR both the inner and # outer exception immediately. # # Any iteration of res.get() or any next call will detect the # inner exception and re-raise it in the calling Thread; though # it may be confusing to see an exception for an unrelated # statement, an ERROR log statement from the 'sqlitedict.*' # namespace contains the original outer stack location. self.log.error('Inner exception:') for item in traceback.format_list(inner_stack): self.log.error(item) self.log.error('') # deliniate traceback & exception w/blank line for item in traceback.format_exception_only(e_type, e_value): self.log.error(item) self.log.error('') # exception & outer stack w/blank line if self._outer_stack: self.log.error('Outer stack:') for item in traceback.format_list(outer_stack): self.log.error(item) self.log.error('Exception will be re-raised at next call.') else: self.log.error( 'Unable to show the outer stack. Pass ' 'outer_stack=True when initializing the ' 'SqliteDict instance to show the outer stack.' ) if res_ref: for rec in cursor: if _put(res_ref, rec) == _PUT_REFERENT_DESTROYED: # # The queue we are sending responses to got garbage # collected. Nobody is listening anymore, so we # stop sending responses. # break _put(res_ref, _RESPONSE_NO_MORE) if self.autocommit: conn.commit() self.log.debug('received: %s, send: --no more--', req) conn.close() _put(res_ref, _RESPONSE_NO_MORE) def check_raise_error(self): """ Check for and raise exception for any previous sqlite query. For the `execute*` family of method calls, such calls are non-blocking and any exception raised in the thread cannot be handled by the calling Thread (usually MainThread). This method is called on `close`, and prior to any subsequent calls to the `execute*` methods to check for and raise an exception in a previous call to the MainThread. """ ... def execute(self, req, arg=None, res=None): """ `execute` calls are non-blocking: just queue up the request and return immediately. :param req: The request (an SQL command) :param arg: Arguments to the SQL command :param res: A queue in which to place responses as they become available """ ... def executemany(self, req, items): ... def select(self, req, arg=None): """ Unlike sqlite's native select, this select doesn't handle iteration efficiently. The result of `select` starts filling up with values as soon as the request is dequeued, and although you can iterate over the result normally (`for res in self.select(): ...`), the entire result will be in memory. """ ... def select_one(self, req, arg=None): """Return only the first row of the SELECT, or None if there are no matching rows.""" ... def commit(self, blocking=True): ... def close(self, force=False): ... # FILE sqlitedict/sqlitedict.py class SqliteMultithread(threading.Thread): """ Wrap sqlite connection in a way that allows concurrent requests from multiple threads. This is done by internally queueing the requests and processing them sequentially in a separate thread (in the same order they arrived). """ def __init__(self, filename, autocommit, journal_mode, outer_stack=True): super(SqliteMultithread, self).__init__() self.filename = filename self.autocommit = autocommit self.journal_mode = journal_mode # use request queue of unlimited size self.reqs = Queue() self.daemon = True self._outer_stack = outer_stack self.log = logging.getLogger('sqlitedict.SqliteMultithread') # # Parts of this object's state get accessed from different threads, so # we use synchronization to avoid race conditions. For example, # .exception gets set inside the new daemon thread that we spawned, but # gets read from the main thread. This is particularly important # during initialization: the Thread needs some time to actually start # working, and until this happens, any calls to e.g. # check_raise_error() will prematurely return None, meaning all is # well. If the that connection happens to fail, we'll never know about # it, and instead wait for a result that never arrives (effectively, # deadlocking). Locking solves this problem by eliminating the race # condition. # self._lock = threading.Lock() self._lock.acquire() self.exception = None self.start() def _connect(self): """Connect to the underlying database. Raises an exception on failure. Returns the connection and cursor on success. """ ... def run(self): ... def check_raise_error(self): """ Check for and raise exception for any previous sqlite query. For the `execute*` family of method calls, such calls are non-blocking and any exception raised in the thread cannot be handled by the calling Thread (usually MainThread). This method is called on `close`, and prior to any subsequent calls to the `execute*` methods to check for and raise an exception in a previous call to the MainThread. """ with self._lock: if self.exception: e_type, e_value, e_tb = self.exception # clear self.exception, if the caller decides to handle such # exception, we should not repeatedly re-raise it. self.exception = None self.log.error('An exception occurred from a previous statement, view ' 'the logging namespace "sqlitedict" for outer stack.') # The third argument to raise is the traceback object, and it is # substituted instead of the current location as the place where # the exception occurred, this is so that when using debuggers such # as `pdb', or simply evaluating the naturally raised traceback, we # retain the original (inner) location of where the exception # occurred. reraise(e_type, e_value, e_tb) def execute(self, req, arg=None, res=None): """ `execute` calls are non-blocking: just queue up the request and return immediately. :param req: The request (an SQL command) :param arg: Arguments to the SQL command :param res: A queue in which to place responses as they become available """ ... def executemany(self, req, items): ... def select(self, req, arg=None): """ Unlike sqlite's native select, this select doesn't handle iteration efficiently. The result of `select` starts filling up with values as soon as the request is dequeued, and although you can iterate over the result normally (`for res in self.select(): ...`), the entire result will be in memory. """ ... def select_one(self, req, arg=None): """Return only the first row of the SELECT, or None if there are no matching rows.""" ... def commit(self, blocking=True): ... def close(self, force=False): ... # FILE sqlitedict/sqlitedict.py class SqliteMultithread(threading.Thread): """ Wrap sqlite connection in a way that allows concurrent requests from multiple threads. This is done by internally queueing the requests and processing them sequentially in a separate thread (in the same order they arrived). """ def __init__(self, filename, autocommit, journal_mode, outer_stack=True): super(SqliteMultithread, self).__init__() self.filename = filename self.autocommit = autocommit self.journal_mode = journal_mode # use request queue of unlimited size self.reqs = Queue() self.daemon = True self._outer_stack = outer_stack self.log = logging.getLogger('sqlitedict.SqliteMultithread') # # Parts of this object's state get accessed from different threads, so # we use synchronization to avoid race conditions. For example, # .exception gets set inside the new daemon thread that we spawned, but # gets read from the main thread. This is particularly important # during initialization: the Thread needs some time to actually start # working, and until this happens, any calls to e.g. # check_raise_error() will prematurely return None, meaning all is # well. If the that connection happens to fail, we'll never know about # it, and instead wait for a result that never arrives (effectively, # deadlocking). Locking solves this problem by eliminating the race # condition. # self._lock = threading.Lock() self._lock.acquire() self.exception = None self.start() def _connect(self): """Connect to the underlying database. Raises an exception on failure. Returns the connection and cursor on success. """ try: if self.autocommit: conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False) else: conn = sqlite3.connect(self.filename, check_same_thread=False) except Exception: self.log.exception("Failed to initialize connection for filename: %s" % self.filename) self.exception = sys.exc_info() raise try: conn.execute('PRAGMA journal_mode = %s' % self.journal_mode) conn.text_factory = str cursor = conn.cursor() conn.commit() cursor.execute('PRAGMA synchronous=OFF') except Exception: self.log.exception("Failed to execute PRAGMA statements.") self.exception = sys.exc_info() raise return conn, cursor def run(self): # # Nb. this is what actually runs inside the new daemon thread. # self._lock is locked at this stage - see the initializer function. # try: conn, cursor = self._connect() finally: self._lock.release() res_ref = None while True: # # req: an SQL command or one of the --magic-- commands we use internally # arg: arguments for the command # res_ref: a weak reference to the queue into which responses must be placed # outer_stack: the outer stack, for producing more informative traces in case of error # req, arg, res_ref, outer_stack = self.reqs.get() if req == _REQUEST_CLOSE: assert res_ref, ('--close-- without return queue', res_ref) break elif req == _REQUEST_COMMIT: conn.commit() _put(res_ref, _RESPONSE_NO_MORE) else: try: cursor.execute(req, arg) except Exception: with self._lock: self.exception = (e_type, e_value, e_tb) = sys.exc_info() inner_stack = traceback.extract_stack() # An exception occurred in our thread, but we may not # immediately able to throw it in our calling thread, if it has # no return `res` queue: log as level ERROR both the inner and # outer exception immediately. # # Any iteration of res.get() or any next call will detect the # inner exception and re-raise it in the calling Thread; though # it may be confusing to see an exception for an unrelated # statement, an ERROR log statement from the 'sqlitedict.*' # namespace contains the original outer stack location. self.log.error('Inner exception:') for item in traceback.format_list(inner_stack): self.log.error(item) self.log.error('') # deliniate traceback & exception w/blank line for item in traceback.format_exception_only(e_type, e_value): self.log.error(item) self.log.error('') # exception & outer stack w/blank line if self._outer_stack: self.log.error('Outer stack:') for item in traceback.format_list(outer_stack): self.log.error(item) self.log.error('Exception will be re-raised at next call.') else: self.log.error( 'Unable to show the outer stack. Pass ' 'outer_stack=True when initializing the ' 'SqliteDict instance to show the outer stack.' ) if res_ref: for rec in cursor: if _put(res_ref, rec) == _PUT_REFERENT_DESTROYED: # # The queue we are sending responses to got garbage # collected. Nobody is listening anymore, so we # stop sending responses. # break _put(res_ref, _RESPONSE_NO_MORE) if self.autocommit: conn.commit() self.log.debug('received: %s, send: --no more--', req) conn.close() _put(res_ref, _RESPONSE_NO_MORE) def check_raise_error(self): """ Check for and raise exception for any previous sqlite query. For the `execute*` family of method calls, such calls are non-blocking and any exception raised in the thread cannot be handled by the calling Thread (usually MainThread). This method is called on `close`, and prior to any subsequent calls to the `execute*` methods to check for and raise an exception in a previous call to the MainThread. """ with self._lock: if self.exception: e_type, e_value, e_tb = self.exception # clear self.exception, if the caller decides to handle such # exception, we should not repeatedly re-raise it. self.exception = None self.log.error('An exception occurred from a previous statement, view ' 'the logging namespace "sqlitedict" for outer stack.') # The third argument to raise is the traceback object, and it is # substituted instead of the current location as the place where # the exception occurred, this is so that when using debuggers such # as `pdb', or simply evaluating the naturally raised traceback, we # retain the original (inner) location of where the exception # occurred. reraise(e_type, e_value, e_tb) def execute(self, req, arg=None, res=None): """ `execute` calls are non-blocking: just queue up the request and return immediately. :param req: The request (an SQL command) :param arg: Arguments to the SQL command :param res: A queue in which to place responses as they become available """ self.check_raise_error() stack = None if self._outer_stack: # NOTE: This might be a lot of information to pump into an input # queue, affecting performance. I've also seen earlier versions of # jython take a severe performance impact for throwing exceptions # so often. stack = traceback.extract_stack()[:-1] # # We pass a weak reference to the response queue instead of a regular # reference, because we want the queues to be garbage-collected # more aggressively. # res_ref = None if res: res_ref = weakref.ref(res) self.reqs.put((req, arg or tuple(), res_ref, stack)) def executemany(self, req, items): for item in items: self.execute(req, item) self.check_raise_error() def select(self, req, arg=None): """ Unlike sqlite's native select, this select doesn't handle iteration efficiently. The result of `select` starts filling up with values as soon as the request is dequeued, and although you can iterate over the result normally (`for res in self.select(): ...`), the entire result will be in memory. """ res = Queue() # results of the select will appear as items in this queue self.execute(req, arg, res) while True: rec = res.get() self.check_raise_error() if rec == _RESPONSE_NO_MORE: break yield rec def select_one(self, req, arg=None): """Return only the first row of the SELECT, or None if there are no matching rows.""" try: return next(iter(self.select(req, arg))) except StopIteration: return None def commit(self, blocking=True): if blocking: # by default, we await completion of commit() unless # blocking=False. This ensures any available exceptions for any # previous statement are thrown before returning, and that the # data has actually persisted to disk! self.select_one(_REQUEST_COMMIT) else: # otherwise, we fire and forget as usual. self.execute(_REQUEST_COMMIT) def close(self, force=False): if force: # If a SqliteDict is being killed or garbage-collected, then select_one() # could hang forever because run() might already have exited and therefore # can't process the request. Instead, push the close command to the requests # queue directly. If run() is still alive, it will exit gracefully. If not, # then there's nothing we can do anyway. self.reqs.put((_REQUEST_CLOSE, None, weakref.ref(Queue()), None)) else: # we abuse 'select' to "iter" over a "--close--" statement so that we # can confirm the completion of close before joining the thread and # returning (by semaphore '--no more--' self.select_one(_REQUEST_CLOSE) self.join() # FILE sqlitedict/sqlitedict.py class SqliteMultithread(threading.Thread): """ Wrap sqlite connection in a way that allows concurrent requests from multiple threads. This is done by internally queueing the requests and processing them sequentially in a separate thread (in the same order they arrived). """ def __init__(self, filename, autocommit, journal_mode, outer_stack=True): super(SqliteMultithread, self).__init__() self.filename = filename self.autocommit = autocommit self.journal_mode = journal_mode # use request queue of unlimited size self.reqs = Queue() self.daemon = True self._outer_stack = outer_stack self.log = logging.getLogger('sqlitedict.SqliteMultithread') # # Parts of this object's state get accessed from different threads, so # we use synchronization to avoid race conditions. For example, # .exception gets set inside the new daemon thread that we spawned, but # gets read from the main thread. This is particularly important # during initialization: the Thread needs some time to actually start # working, and until this happens, any calls to e.g. # check_raise_error() will prematurely return None, meaning all is # well. If the that connection happens to fail, we'll never know about # it, and instead wait for a result that never arrives (effectively, # deadlocking). Locking solves this problem by eliminating the race # condition. # self._lock = threading.Lock() self._lock.acquire() self.exception = None self.start() def _connect(self): """Connect to the underlying database. Raises an exception on failure. Returns the connection and cursor on success. """ ... def run(self): ... def check_raise_error(self): """ Check for and raise exception for any previous sqlite query. For the `execute*` family of method calls, such calls are non-blocking and any exception raised in the thread cannot be handled by the calling Thread (usually MainThread). This method is called on `close`, and prior to any subsequent calls to the `execute*` methods to check for and raise an exception in a previous call to the MainThread. """ ... def execute(self, req, arg=None, res=None): """ `execute` calls are non-blocking: just queue up the request and return immediately. :param req: The request (an SQL command) :param arg: Arguments to the SQL command :param res: A queue in which to place responses as they become available """ ... def executemany(self, req, items): ... def select(self, req, arg=None): """ Unlike sqlite's native select, this select doesn't handle iteration efficiently. The result of `select` starts filling up with values as soon as the request is dequeued, and although you can iterate over the result normally (`for res in self.select(): ...`), the entire result will be in memory. """ ... def select_one(self, req, arg=None): """Return only the first row of the SELECT, or None if there are no matching rows.""" ... def commit(self, blocking=True): ... def close(self, force=False): ... # FILE sqlitedict/sqlitedict.py def encode_key(key): """Serialize a key using pickle + base64 encoding to text accepted by SQLite.""" return b64encode(dumps(key, protocol=PICKLE_PROTOCOL)).decode("ascii") # FILE sqlitedict/sqlitedict.py def open(*args, **kwargs): """See documentation of the SqliteDict class.""" return SqliteDict(*args, **kwargs) # FILE sqlitedict/sqlitedict.py def encode(obj): """Serialize an object using pickle to a binary format accepted by SQLite.""" return sqlite3.Binary(dumps(obj, protocol=PICKLE_PROTOCOL)) # FILE sqlitedict/sqlitedict.py def decode(obj): """Deserialize objects retrieved from SQLite.""" return loads(bytes(obj)) # FILE sqlitedict/sqlitedict.py def decode_key(key): """Deserialize a key retrieved from SQLite.""" return loads(b64decode(key.encode("ascii"))) # FILE sqlitedict/sqlitedict.py class SqliteMultithread(threading.Thread): """ Wrap sqlite connection in a way that allows concurrent requests from multiple threads. This is done by internally queueing the requests and processing them sequentially in a separate thread (in the same order they arrived). """ def __init__(self, filename, autocommit, journal_mode, outer_stack=True): super(SqliteMultithread, self).__init__() self.filename = filename self.autocommit = autocommit self.journal_mode = journal_mode # use request queue of unlimited size self.reqs = Queue() self.daemon = True self._outer_stack = outer_stack self.log = logging.getLogger('sqlitedict.SqliteMultithread') # # Parts of this object's state get accessed from different threads, so # we use synchronization to avoid race conditions. For example, # .exception gets set inside the new daemon thread that we spawned, but # gets read from the main thread. This is particularly important # during initialization: the Thread needs some time to actually start # working, and until this happens, any calls to e.g. # check_raise_error() will prematurely return None, meaning all is # well. If the that connection happens to fail, we'll never know about # it, and instead wait for a result that never arrives (effectively, # deadlocking). Locking solves this problem by eliminating the race # condition. # self._lock = threading.Lock() self._lock.acquire() self.exception = None self.start() def _connect(self): """Connect to the underlying database. Raises an exception on failure. Returns the connection and cursor on success. """ ... def run(self): ... def check_raise_error(self): """ Check for and raise exception for any previous sqlite query. For the `execute*` family of method calls, such calls are non-blocking and any exception raised in the thread cannot be handled by the calling Thread (usually MainThread). This method is called on `close`, and prior to any subsequent calls to the `execute*` methods to check for and raise an exception in a previous call to the MainThread. """ ... def execute(self, req, arg=None, res=None): """ `execute` calls are non-blocking: just queue up the request and return immediately. :param req: The request (an SQL command) :param arg: Arguments to the SQL command :param res: A queue in which to place responses as they become available """ ... def executemany(self, req, items): ... def select(self, req, arg=None): """ Unlike sqlite's native select, this select doesn't handle iteration efficiently. The result of `select` starts filling up with values as soon as the request is dequeued, and although you can iterate over the result normally (`for res in self.select(): ...`), the entire result will be in memory. """ ... def select_one(self, req, arg=None): """Return only the first row of the SELECT, or None if there are no matching rows.""" ... def commit(self, blocking=True): if blocking: # by default, we await completion of commit() unless # blocking=False. This ensures any available exceptions for any # previous statement are thrown before returning, and that the # data has actually persisted to disk! self.select_one(_REQUEST_COMMIT) else: # otherwise, we fire and forget as usual. self.execute(_REQUEST_COMMIT) def close(self, force=False): ... Based on the information above, please complete the function: #CURRENT_FILE: sqlitedict/sqlitedict.py import sqlite3 import os import sys import tempfile import threading import logging import traceback from base64 import b64decode, b64encode import weakref from cPickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL from pickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL from collections import UserDict as DictClass from UserDict import DictMixin as DictClass from queue import Queue from Queue import Queue class SqliteDict(DictClass): VALID_FLAGS = ['c', 'r', 'w', 'n'] def __init__(self, filename=None, tablename='unnamed', flag='c', autocommit=False, journal_mode="DELETE", encode=encode, decode=decode, encode_key=identity, decode_key=identity, timeout=5, outer_stack=True): """ Initialize a thread-safe sqlite-backed dictionary. The dictionary will be a table `tablename` in database file `filename`. A single file (=database) may contain multiple tables. If no `filename` is given, a random file in temp will be used (and deleted from temp once the dict is closed/deleted). If you enable `autocommit`, changes will be committed after each operation (more inefficient but safer). Otherwise, changes are committed on `self.commit()`, `self.clear()` and `self.close()`. Set `journal_mode` to 'OFF' if you're experiencing sqlite I/O problems or if you need performance and don't care about crash-consistency. Set `outer_stack` to False to disable the output of the outer exception to the error logs. This may improve the efficiency of sqlitedict operation at the expense of a detailed exception trace. The `flag` parameter. Exactly one of: 'c': default mode, open for read/write, creating the db/table if necessary. 'w': open for r/w, but drop `tablename` contents first (start with empty table) 'r': open as read-only 'n': create a new database (erasing any existing tables, not just `tablename`!). The `encode` and `decode` parameters are used to customize how the values are serialized and deserialized. The `encode` parameter must be a function that takes a single Python object and returns a serialized representation. The `decode` function must be a function that takes the serialized representation produced by `encode` and returns a deserialized Python object. The default is to use pickle. The `timeout` defines the maximum time (in seconds) to wait for initial Thread startup. """ self.in_temp = filename is None if self.in_temp: fd, filename = tempfile.mkstemp(prefix='sqldict') os.close(fd) if flag not in SqliteDict.VALID_FLAGS: raise RuntimeError("Unrecognized flag: %s" % flag) self.flag = flag if flag == 'n': if os.path.exists(filename): os.remove(filename) dirname = os.path.dirname(filename) if dirname: if not os.path.exists(dirname): raise RuntimeError('Error! The directory does not exist, %s' % dirname) self.filename = filename # Use standard SQL escaping of double quote characters in identifiers, by doubling them. # See https://github.com/RaRe-Technologies/sqlitedict/pull/113 self.tablename = tablename.replace('"', '""') self.autocommit = autocommit self.journal_mode = journal_mode self.encode = encode self.decode = decode self.encode_key = encode_key self.decode_key = decode_key self._outer_stack = outer_stack logger.debug("opening Sqlite table %r in %r" % (tablename, filename)) self.conn = self._new_conn() if self.flag == 'r': if self.tablename not in SqliteDict.get_tablenames(self.filename): msg = 'Refusing to create a new table "%s" in read-only DB mode' % tablename raise RuntimeError(msg) else: MAKE_TABLE = 'CREATE TABLE IF NOT EXISTS "%s" (key TEXT PRIMARY KEY, value BLOB)' % self.tablename self.conn.execute(MAKE_TABLE) self.conn.commit() if flag == 'w': self.clear() def _new_conn(self): return SqliteMultithread( self.filename, autocommit=self.autocommit, journal_mode=self.journal_mode, outer_stack=self._outer_stack, ) def __enter__(self): if not hasattr(self, 'conn') or self.conn is None: self.conn = self._new_conn() return self def __exit__(self, *exc_info): self.close() def __str__(self): return "SqliteDict(%s)" % (self.filename) def __repr__(self): return str(self) # no need of something complex def __len__(self): # `select count (*)` is super slow in sqlite (does a linear scan!!) # As a result, len() is very slow too once the table size grows beyond trivial. # We could keep the total count of rows ourselves, by means of triggers, # but that seems too complicated and would slow down normal operation # (insert/delete etc). GET_LEN = 'SELECT COUNT(*) FROM "%s"' % self.tablename rows = self.conn.select_one(GET_LEN)[0] return rows if rows is not None else 0 def __bool__(self): # No elements is False, otherwise True GET_MAX = 'SELECT MAX(ROWID) FROM "%s"' % self.tablename m = self.conn.select_one(GET_MAX)[0] # Explicit better than implicit and bla bla return True if m is not None else False def iterkeys(self): GET_KEYS = 'SELECT key FROM "%s" ORDER BY rowid' % self.tablename for key in self.conn.select(GET_KEYS): yield self.decode_key(key[0]) def itervalues(self): GET_VALUES = 'SELECT value FROM "%s" ORDER BY rowid' % self.tablename for value in self.conn.select(GET_VALUES): yield self.decode(value[0]) def iteritems(self): GET_ITEMS = 'SELECT key, value FROM "%s" ORDER BY rowid' % self.tablename for key, value in self.conn.select(GET_ITEMS): yield self.decode_key(key), self.decode(value) def keys(self): return self.iterkeys() def values(self): return self.itervalues() def items(self): return self.iteritems() def __contains__(self, key): HAS_ITEM = 'SELECT 1 FROM "%s" WHERE key = ?' % self.tablename return self.conn.select_one(HAS_ITEM, (self.encode_key(key),)) is not None def __getitem__(self, key): GET_ITEM = 'SELECT value FROM "%s" WHERE key = ?' % self.tablename item = self.conn.select_one(GET_ITEM, (self.encode_key(key),)) if item is None: raise KeyError(key) return self.decode(item[0]) def __setitem__(self, key, value): if self.flag == 'r': raise RuntimeError('Refusing to write to read-only SqliteDict') ADD_ITEM = 'REPLACE INTO "%s" (key, value) VALUES (?,?)' % self.tablename self.conn.execute(ADD_ITEM, (self.encode_key(key), self.encode(value))) if self.autocommit: self.commit() def __delitem__(self, key): if self.flag == 'r': raise RuntimeError('Refusing to delete from read-only SqliteDict') if key not in self: raise KeyError(key) DEL_ITEM = 'DELETE FROM "%s" WHERE key = ?' % self.tablename self.conn.execute(DEL_ITEM, (self.encode_key(key),)) if self.autocommit: self.commit() def update(self, items=(), **kwds): if self.flag == 'r': raise RuntimeError('Refusing to update read-only SqliteDict') try: items = items.items() except AttributeError: pass items = [(self.encode_key(k), self.encode(v)) for k, v in items] UPDATE_ITEMS = 'REPLACE INTO "%s" (key, value) VALUES (?, ?)' % self.tablename self.conn.executemany(UPDATE_ITEMS, items) if kwds: self.update(kwds) if self.autocommit: self.commit() def __iter__(self): return self.iterkeys() def clear(self): if self.flag == 'r': raise RuntimeError('Refusing to clear read-only SqliteDict') # avoid VACUUM, as it gives "OperationalError: database schema has changed" CLEAR_ALL = 'DELETE FROM "%s";' % self.tablename self.conn.commit() self.conn.execute(CLEAR_ALL) self.conn.commit() @staticmethod def get_tablenames(filename): """get the names of the tables in an sqlite db as a list"""
sqlitedict/sqlitedict.py
litecli.packages.parseutils.query_starts_with
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE litecli/litecli/packages/parseutils.py def is_destructive(queries): """Returns if any of the queries in *queries* is destructive.""" keywords = ("drop", "shutdown", "delete", "truncate", "alter") return queries_start_with(queries, keywords) # FILE litecli/litecli/packages/parseutils.py def queries_start_with(queries, prefixes): """Check if any queries start with any item from *prefixes*.""" for query in sqlparse.split(queries): if query and query_starts_with(query, prefixes) is True: return True return False Based on the information above, please complete the function: #CURRENT_FILE: litecli/litecli/packages/parseutils.py from __future__ import print_function import re import sqlparse from sqlparse.sql import IdentifierList, Identifier, Function from sqlparse.tokens import Keyword, DML, Punctuation def query_starts_with(query, prefixes): """Check if the query starts with any item from *prefixes*."""
litecli/litecli/packages/parseutils.py
rest_framework.negotiation.DefaultContentNegotiation.filter_renderers
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE djangorestframework/rest_framework/utils/mediatypes.py class _MediaType: def __init__(self, media_type_str): self.orig = '' if (media_type_str is None) else media_type_str self.full_type, self.params = parse_header_parameters(self.orig) self.main_type, sep, self.sub_type = self.full_type.partition('/') def match(self, other): """Return true if this MediaType satisfies the given MediaType.""" ... def precedence(self): """ Return a precedence level from 0-3 for the media type given how specific it is. """ ... def __str__(self): ... # FILE djangorestframework/rest_framework/negotiation.py class BaseContentNegotiation: def select_parser(self, request, parsers): ... def select_renderer(self, request, renderers, format_suffix=None): raise NotImplementedError('.select_renderer() must be implemented') # FILE djangorestframework/rest_framework/utils/mediatypes.py def order_by_precedence(media_type_lst): """ Returns a list of sets of media type strings, ordered by precedence. Precedence is determined by how specific a media type is: 3. 'type/subtype; param=val' 2. 'type/subtype' 1. 'type/*' 0. '*/*' """ ret = [set(), set(), set(), set()] for media_type in media_type_lst: precedence = _MediaType(media_type).precedence ret[3 - precedence].add(media_type) return [media_types for media_types in ret if media_types] # FILE djangorestframework/rest_framework/utils/mediatypes.py def media_type_matches(lhs, rhs): """ Returns ``True`` if the media type in the first argument <= the media type in the second argument. The media types are strings as described by the HTTP spec. Valid media type strings include: 'application/json; indent=4' 'application/json' 'text/*' '*/*' """ lhs = _MediaType(lhs) rhs = _MediaType(rhs) return lhs.match(rhs) # FILE djangorestframework/rest_framework/negotiation.py class BaseContentNegotiation: def select_parser(self, request, parsers): raise NotImplementedError('.select_parser() must be implemented') def select_renderer(self, request, renderers, format_suffix=None): raise NotImplementedError('.select_renderer() must be implemented') # FILE djangorestframework/rest_framework/utils/mediatypes.py class _MediaType: def __init__(self, media_type_str): self.orig = '' if (media_type_str is None) else media_type_str self.full_type, self.params = parse_header_parameters(self.orig) self.main_type, sep, self.sub_type = self.full_type.partition('/') def match(self, other): """Return true if this MediaType satisfies the given MediaType.""" for key in self.params: if key != 'q' and other.params.get(key, None) != self.params.get(key, None): return False if self.sub_type != '*' and other.sub_type != '*' and other.sub_type != self.sub_type: return False if self.main_type != '*' and other.main_type != '*' and other.main_type != self.main_type: return False return True @property def precedence(self): """ Return a precedence level from 0-3 for the media type given how specific it is. """ if self.main_type == '*': return 0 elif self.sub_type == '*': return 1 elif not self.params or list(self.params) == ['q']: return 2 return 3 def __str__(self): ret = "%s/%s" % (self.main_type, self.sub_type) for key, val in self.params.items(): ret += "; %s=%s" % (key, val) return ret # FILE djangorestframework/rest_framework/utils/mediatypes.py class _MediaType: def __init__(self, media_type_str): self.orig = '' if (media_type_str is None) else media_type_str self.full_type, self.params = parse_header_parameters(self.orig) self.main_type, sep, self.sub_type = self.full_type.partition('/') def match(self, other): """Return true if this MediaType satisfies the given MediaType.""" for key in self.params: if key != 'q' and other.params.get(key, None) != self.params.get(key, None): return False if self.sub_type != '*' and other.sub_type != '*' and other.sub_type != self.sub_type: return False if self.main_type != '*' and other.main_type != '*' and other.main_type != self.main_type: return False return True def precedence(self): """ Return a precedence level from 0-3 for the media type given how specific it is. """ ... def __str__(self): ... Based on the information above, please complete the function: #CURRENT_FILE: djangorestframework/rest_framework/negotiation.py from django.http import Http404 from rest_framework import exceptions from rest_framework.settings import api_settings from rest_framework.utils.mediatypes import _MediaType, media_type_matches, order_by_precedence class DefaultContentNegotiation(BaseContentNegotiation): settings = api_settings def select_parser(self, request, parsers): """ Given a list of parsers and a media type, return the appropriate parser to handle the incoming request. """ for parser in parsers: if media_type_matches(parser.media_type, request.content_type): return parser return None def select_renderer(self, request, renderers, format_suffix=None): """ Given a request and a list of renderers, return a two-tuple of: (renderer, media type). """ # Allow URL style format override. eg. "?format=json format_query_param = self.settings.URL_FORMAT_OVERRIDE format = format_suffix or request.query_params.get(format_query_param) if format: renderers = self.filter_renderers(renderers, format) accepts = self.get_accept_list(request) # Check the acceptable media types against each renderer, # attempting more specific media types first # NB. The inner loop here isn't as bad as it first looks :) # Worst case is we're looping over len(accept_list) * len(self.renderers) for media_type_set in order_by_precedence(accepts): for renderer in renderers: for media_type in media_type_set: if media_type_matches(renderer.media_type, media_type): # Return the most specific media type as accepted. media_type_wrapper = _MediaType(media_type) if ( _MediaType(renderer.media_type).precedence > media_type_wrapper.precedence ): # Eg client requests '*/*' # Accepted media type is 'application/json' full_media_type = ';'.join( (renderer.media_type,) + tuple( '{}={}'.format(key, value) for key, value in media_type_wrapper.params.items() ) ) return renderer, full_media_type else: # Eg client requests 'application/json; indent=8' # Accepted media type is 'application/json; indent=8' return renderer, media_type raise exceptions.NotAcceptable(available_renderers=renderers) def filter_renderers(self, renderers, format): """ If there is a '.json' style format suffix, filter the renderers so that we only negotiation against those that accept that format. """
djangorestframework/rest_framework/negotiation.py
rest_framework.templatetags.rest_framework.as_string
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE djangorestframework/rest_framework/templatetags/rest_framework.py def format_value(value): if getattr(value, 'is_hyperlink', False): name = str(value.obj) return mark_safe('<a href=%s>%s</a>' % (value, escape(name))) if value is None or isinstance(value, bool): return mark_safe('<code>%s</code>' % {True: 'true', False: 'false', None: 'null'}[value]) elif isinstance(value, list): if any(isinstance(item, (list, dict)) for item in value): template = loader.get_template('rest_framework/admin/list_value.html') else: template = loader.get_template('rest_framework/admin/simple_list_value.html') context = {'value': value} return template.render(context) elif isinstance(value, dict): template = loader.get_template('rest_framework/admin/dict_value.html') context = {'value': value} return template.render(context) elif isinstance(value, str): if ( (value.startswith('http:') or value.startswith('https:') or value.startswith('/')) and not re.search(r'\s', value) ): return mark_safe('<a href="{value}">{value}</a>'.format(value=escape(value))) elif '@' in value and not re.search(r'\s', value): return mark_safe('<a href="mailto:{value}">{value}</a>'.format(value=escape(value))) elif '\n' in value: return mark_safe('<pre>%s</pre>' % escape(value)) return str(value) # FILE djangorestframework/rest_framework/templatetags/rest_framework.py def as_list_of_strings(value): return [ '' if (item is None) else ('%s' % item) for item in value ] Based on the information above, please complete the function: #CURRENT_FILE: djangorestframework/rest_framework/templatetags/rest_framework.py import re from collections import OrderedDict from django import template from django.template import loader from django.urls import NoReverseMatch, reverse from django.utils.encoding import iri_to_uri from django.utils.html import escape, format_html, smart_urlquote from django.utils.safestring import mark_safe from rest_framework.compat import apply_markdown, pygments_highlight from rest_framework.renderers import HTMLFormRenderer import coreschema from rest_framework.utils.urls import replace_query_param def as_string(value):
djangorestframework/rest_framework/templatetags/rest_framework.py
rest_framework.templatetags.rest_framework.add_nested_class
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE djangorestframework/rest_framework/templatetags/rest_framework.py def items(value): """ Simple filter to return the items of the dict. Useful when the dict may have a key 'items' which is resolved first in Django template dot-notation lookup. See issue #4931 Also see: https://stackoverflow.com/questions/15416662/django-template-loop-over-dictionary-items-with-items-as-key """ if value is None: # `{% for k, v in value.items %}` doesn't raise when value is None or # not in the context, so neither should `{% for k, v in value|items %}` return [] return value.items() Based on the information above, please complete the function: #CURRENT_FILE: djangorestframework/rest_framework/templatetags/rest_framework.py import re from collections import OrderedDict from django import template from django.template import loader from django.urls import NoReverseMatch, reverse from django.utils.encoding import iri_to_uri from django.utils.html import escape, format_html, smart_urlquote from django.utils.safestring import mark_safe from rest_framework.compat import apply_markdown, pygments_highlight from rest_framework.renderers import HTMLFormRenderer import coreschema from rest_framework.utils.urls import replace_query_param def add_nested_class(value):
djangorestframework/rest_framework/templatetags/rest_framework.py
pyramid.session.PickleSerializer.loads
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/session.py def manage_accessed(wrapped): """Decorator which causes a cookie to be renewed when an accessor method is called.""" def accessed(session, *arg, **kw): session.accessed = now = int(time.time()) if session._reissue_time is not None: if now - session.renewed > session._reissue_time: session.changed() return wrapped(session, *arg, **kw) accessed.__doc__ = wrapped.__doc__ return accessed # LIB zope def deprecated(specifier, message, cls=DeprecationWarning): """Deprecate the given names.""" # A string specifier (or list of strings) means we're called # top-level in a module and are to deprecate things inside this # module if isinstance(specifier, str_and_sequence_types): globals = sys._getframe(1).f_globals modname = globals['__name__'] if not isinstance(sys.modules[modname], DeprecationProxy): sys.modules[modname] = DeprecationProxy(sys.modules[modname]) sys.modules[modname].deprecate(specifier, message, cls) # Anything else can mean the specifier is a function/method, # module, or just an attribute of a class elif isinstance(specifier, types.FunctionType): return DeprecatedMethod(specifier, message, cls) elif isinstance(specifier, types.ModuleType): return DeprecatedModule(specifier, message, cls) else: prop = specifier if hasattr(prop, '__get__') and hasattr(prop, '__set__') and \ hasattr(prop, '__delete__'): return DeprecatedGetSetDeleteProperty(prop, message, cls) elif hasattr(prop, '__get__') and hasattr(prop, '__set__'): return DeprecatedGetSetProperty(prop, message, cls) elif hasattr(prop, '__get__'): return DeprecatedGetProperty(prop, message, cls) # LIB webob class SignedSerializer(object): """ A helper to cryptographically sign arbitrary content using HMAC. The serializer accepts arbitrary functions for performing the actual serialization and deserialization. ``secret`` A string which is used to sign the cookie. The secret should be at least as long as the block size of the selected hash algorithm. For ``sha512`` this would mean a 512 bit (64 character) secret. ``salt`` A namespace to avoid collisions between different uses of a shared secret. ``hashalg`` The HMAC digest algorithm to use for signing. The algorithm must be supported by the :mod:`hashlib` library. Default: ``'sha512'``. ``serializer`` An object with two methods: `loads`` and ``dumps``. The ``loads`` method should accept bytes and return a Python object. The ``dumps`` method should accept a Python object and return bytes. A ``ValueError`` should be raised for malformed inputs. Default: ``None`, which will use a derivation of :func:`json.dumps` and ``json.loads``. """ def __init__(self, secret, salt, hashalg='sha512', serializer=None, ): self.salt = salt self.secret = secret self.hashalg = hashalg try: # bwcompat with webob <= 1.3.1, leave latin-1 as the default self.salted_secret = bytes_(salt or '') + bytes_(secret) except UnicodeEncodeError: self.salted_secret = ( bytes_(salt or '', 'utf-8') + bytes_(secret, 'utf-8')) self.digestmod = lambda string=b'': hashlib.new(self.hashalg, string) self.digest_size = self.digestmod().digest_size if serializer is None: serializer = JSONSerializer() self.serializer = serializer def dumps(self, appstruct): """ Given an ``appstruct``, serialize and sign the data. Returns a bytestring. """ cstruct = self.serializer.dumps(appstruct) # will be bytes sig = hmac.new(self.salted_secret, cstruct, self.digestmod).digest() return base64.urlsafe_b64encode(sig + cstruct).rstrip(b'=') def loads(self, bstruct): """ Given a ``bstruct`` (a bytestring), verify the signature and then deserialize and return the deserialized value. A ``ValueError`` will be raised if the signature fails to validate. """ try: b64padding = b'=' * (-len(bstruct) % 4) fstruct = base64.urlsafe_b64decode(bytes_(bstruct) + b64padding) except (binascii.Error, TypeError) as e: raise ValueError('Badly formed base64 data: %s' % e) cstruct = fstruct[self.digest_size:] expected_sig = fstruct[:self.digest_size] sig = hmac.new( self.salted_secret, bytes_(cstruct), self.digestmod).digest() if strings_differ(sig, expected_sig): raise ValueError('Invalid signature') return self.serializer.loads(cstruct) # FILE pyramid/build/lib/pyramid/csrf.py def check_csrf_token( request, token='csrf_token', header='X-CSRF-Token', raises=True ): """Check the CSRF token returned by the :class:`pyramid.interfaces.ICSRFStoragePolicy` implementation against the value in ``request.POST.get(token)`` (if a POST request) or ``request.headers.get(header)``. If a ``token`` keyword is not supplied to this function, the string ``csrf_token`` will be used to look up the token in ``request.POST``. If a ``header`` keyword is not supplied to this function, the string ``X-CSRF-Token`` will be used to look up the token in ``request.headers``. If the value supplied by post or by header cannot be verified by the :class:`pyramid.interfaces.ICSRFStoragePolicy`, and ``raises`` is ``True``, this function will raise an :exc:`pyramid.exceptions.BadCSRFToken` exception. If the values differ and ``raises`` is ``False``, this function will return ``False``. If the CSRF check is successful, this function will return ``True`` unconditionally. See :ref:`auto_csrf_checking` for information about how to secure your application automatically against CSRF attacks. .. versionadded:: 1.4a2 .. versionchanged:: 1.7a1 A CSRF token passed in the query string of the request is no longer considered valid. It must be passed in either the request body or a header. .. versionchanged:: 1.9 Moved from :mod:`pyramid.session` to :mod:`pyramid.csrf` and updated to use the configured :class:`pyramid.interfaces.ICSRFStoragePolicy` to verify the CSRF token. """ supplied_token = "" # We first check the headers for a csrf token, as that is significantly # cheaper than checking the POST body if header is not None: supplied_token = request.headers.get(header, "") # If this is a POST/PUT/etc request, then we'll check the body to see if it # has a token. We explicitly use request.POST here because CSRF tokens # should never appear in an URL as doing so is a security issue. We also # explicitly check for request.POST here as we do not support sending form # encoded data over anything but a request.POST. if supplied_token == "" and token is not None: supplied_token = request.POST.get(token, "") policy = request.registry.getUtility(ICSRFStoragePolicy) if not policy.check_csrf_token(request, text_(supplied_token)): if raises: raise BadCSRFToken('check_csrf_token(): Invalid token') return False return True # LIB zope class implementer: """ Declare the interfaces implemented by instances of a class. This function is called as a class decorator. The arguments are one or more interfaces or interface specifications (`~zope.interface.interfaces.IDeclaration` objects). The interfaces given (including the interfaces in the specifications) are added to any interfaces previously declared, unless the interface is already implemented. Previous declarations include declarations for base classes unless implementsOnly was used. This function is provided for convenience. It provides a more convenient way to call `classImplements`. For example:: @implementer(I1) class C(object): pass is equivalent to calling:: classImplements(C, I1) after the class has been created. .. seealso:: `classImplements` The change history provided there applies to this function too. """ __slots__ = ('interfaces',) def __init__(self, *interfaces): self.interfaces = interfaces def __call__(self, ob): if isinstance(ob, type): # This is the common branch for classes. classImplements(ob, *self.interfaces) return ob spec_name = _implements_name(ob) spec = Implements.named(spec_name, *self.interfaces) try: ob.__implemented__ = spec except AttributeError: raise TypeError("Can't declare implements", ob) return ob # LIB webob class JSONSerializer(object): """ A serializer which uses `json.dumps`` and ``json.loads``""" def dumps(self, appstruct): return bytes_(json.dumps(appstruct), encoding='utf-8') def loads(self, bstruct): # NB: json.loads raises ValueError if no json object can be decoded # so we don't have to do it explicitly here. return json.loads(text_(bstruct, encoding='utf-8')) # FILE pyramid/build/lib/pyramid/csrf.py def check_csrf_origin( request, *, trusted_origins=None, allow_no_origin=False, raises=True ): """ Check the ``Origin`` of the request to see if it is a cross site request or not. If the value supplied by the ``Origin`` or ``Referer`` header isn't one of the trusted origins and ``raises`` is ``True``, this function will raise a :exc:`pyramid.exceptions.BadCSRFOrigin` exception, but if ``raises`` is ``False``, this function will return ``False`` instead. If the CSRF origin checks are successful this function will return ``True`` unconditionally. Additional trusted origins may be added by passing a list of domain (and ports if non-standard like ``['example.com', 'dev.example.com:8080']``) in with the ``trusted_origins`` parameter. If ``trusted_origins`` is ``None`` (the default) this list of additional domains will be pulled from the ``pyramid.csrf_trusted_origins`` setting. ``allow_no_origin`` determines whether to return ``True`` when the origin cannot be determined via either the ``Referer`` or ``Origin`` header. The default is ``False`` which will reject the check. Note that this function will do nothing if ``request.scheme`` is not ``https``. .. versionadded:: 1.7 .. versionchanged:: 1.9 Moved from :mod:`pyramid.session` to :mod:`pyramid.csrf` .. versionchanged:: 2.0 Added the ``allow_no_origin`` option. """ def _fail(reason): if raises: raise BadCSRFOrigin("Origin checking failed - " + reason) else: return False # Origin checks are only trustworthy / useful on HTTPS requests. if request.scheme != "https": return True # Suppose user visits http://example.com/ # An active network attacker (man-in-the-middle, MITM) sends a # POST form that targets https://example.com/detonate-bomb/ and # submits it via JavaScript. # # The attacker will need to provide a CSRF cookie and token, but # that's no problem for a MITM when we cannot make any assumptions # about what kind of session storage is being used. So the MITM can # circumvent the CSRF protection. This is true for any HTTP connection, # but anyone using HTTPS expects better! For this reason, for # https://example.com/ we need additional protection that treats # http://example.com/ as completely untrusted. Under HTTPS, # Barth et al. found that the Referer header is missing for # same-domain requests in only about 0.2% of cases or less, so # we can use strict Referer checking. # Determine the origin of this request origin = request.headers.get("Origin") origin_is_referrer = False if origin is None: origin = request.referrer origin_is_referrer = True else: # use the last origin in the list under the assumption that the # server generally appends values and we want the origin closest # to us origin = origin.split(' ')[-1] # If we can't find an origin, fail or pass immediately depending on # ``allow_no_origin`` if not origin: if allow_no_origin: return True else: return _fail("missing Origin or Referer.") # Determine which origins we trust, which by default will include the # current origin. if trusted_origins is None: trusted_origins = aslist( request.registry.settings.get("pyramid.csrf_trusted_origins", []) ) if request.host_port not in {"80", "443"}: trusted_origins.append("{0.domain}:{0.host_port}".format(request)) else: trusted_origins.append(request.domain) # Check "Origin: null" against trusted_origins if not origin_is_referrer and origin == 'null': if origin in trusted_origins: return True else: return _fail("null does not match any trusted origins.") # Parse our origin so we we can extract the required information from # it. originp = urlparse(origin) # Ensure that our Referer is also secure. if originp.scheme != "https": return _fail("Origin is insecure while host is secure.") # Actually check to see if the request's origin matches any of our # trusted origins. if not any( is_same_domain(originp.netloc, host) for host in trusted_origins ): return _fail("{} does not match any trusted origins.".format(origin)) return True # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ # attributes created = Attribute('Integer representing Epoch time when created.') new = Attribute('Boolean attribute. If ``True``, the session is new.') # special methods def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/src/pyramid/session.py def SignedCookieSessionFactory( secret, cookie_name='session', max_age=None, path='/', domain=None, secure=False, httponly=False, samesite='Lax', set_on_exception=True, timeout=1200, reissue_time=0, hashalg='sha512', salt='pyramid.session.', serializer=None, ): """ Configure a :term:`session factory` which will provide signed cookie-based sessions. The return value of this function is a :term:`session factory`, which may be provided as the ``session_factory`` argument of a :class:`pyramid.config.Configurator` constructor, or used as the ``session_factory`` argument of the :meth:`pyramid.config.Configurator.set_session_factory` method. The session factory returned by this function will create sessions which are limited to storing fewer than 4000 bytes of data (as the payload must fit into a single cookie). Parameters: ``secret`` A string which is used to sign the cookie. The secret should be at least as long as the block size of the selected hash algorithm. For ``sha512`` this would mean a 512 bit (64 character) secret. It should be unique within the set of secret values provided to Pyramid for its various subsystems (see :ref:`admonishment_against_secret_sharing`). ``hashalg`` The HMAC digest algorithm to use for signing. The algorithm must be supported by the :mod:`hashlib` library. Default: ``'sha512'``. ``salt`` A namespace to avoid collisions between different uses of a shared secret. Reusing a secret for different parts of an application is strongly discouraged (see :ref:`admonishment_against_secret_sharing`). Default: ``'pyramid.session.'``. ``cookie_name`` The name of the cookie used for sessioning. Default: ``'session'``. ``max_age`` The maximum age of the cookie used for sessioning (in seconds). Default: ``None`` (browser scope). ``path`` The path used for the session cookie. Default: ``'/'``. ``domain`` The domain used for the session cookie. Default: ``None`` (no domain). ``secure`` The 'secure' flag of the session cookie. Default: ``False``. ``httponly`` Hide the cookie from Javascript by setting the 'HttpOnly' flag of the session cookie. Default: ``False``. ``samesite`` The 'samesite' option of the session cookie. Set the value to ``None`` to turn off the samesite option. Default: ``'Lax'``. ``timeout`` A number of seconds of inactivity before a session times out. If ``None`` then the cookie never expires. This lifetime only applies to the *value* within the cookie. Meaning that if the cookie expires due to a lower ``max_age``, then this setting has no effect. Default: ``1200``. ``reissue_time`` The number of seconds that must pass before the cookie is automatically reissued as the result of accessing the session. The duration is measured as the number of seconds since the last session cookie was issued and 'now'. If this value is ``0``, a new cookie will be reissued on every request accessing the session. If ``None`` then the cookie's lifetime will never be extended. A good rule of thumb: if you want auto-expired cookies based on inactivity: set the ``timeout`` value to 1200 (20 mins) and set the ``reissue_time`` value to perhaps a tenth of the ``timeout`` value (120 or 2 mins). It's nonsensical to set the ``timeout`` value lower than the ``reissue_time`` value, as the ticket will never be reissued. However, such a configuration is not explicitly prevented. Default: ``0``. ``set_on_exception`` If ``True``, set a session cookie even if an exception occurs while rendering a view. Default: ``True``. ``serializer`` An object with two methods: ``loads`` and ``dumps``. The ``loads`` method should accept bytes and return a Python object. The ``dumps`` method should accept a Python object and return bytes. A ``ValueError`` should be raised for malformed inputs. If a serializer is not passed, the :class:`pyramid.session.JSONSerializer` serializer will be used. .. warning:: In :app:`Pyramid` 2.0 the default ``serializer`` option changed to use :class:`pyramid.session.JSONSerializer`. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionadded: 1.5a3 .. versionchanged: 1.10 Added the ``samesite`` option and made the default ``Lax``. .. versionchanged: 2.0 Changed the default ``serializer`` to be an instance of :class:`pyramid.session.JSONSerializer`. """ if serializer is None: serializer = JSONSerializer() signed_serializer = SignedSerializer( secret, salt, hashalg, serializer=serializer ) return BaseCookieSessionFactory( signed_serializer, cookie_name=cookie_name, max_age=max_age, path=path, domain=domain, secure=secure, httponly=httponly, samesite=samesite, timeout=timeout, reissue_time=reissue_time, set_on_exception=set_on_exception, ) # FILE pyramid/src/pyramid/session.py def BaseCookieSessionFactory( serializer, cookie_name='session', max_age=None, path='/', domain=None, secure=False, httponly=False, samesite='Lax', timeout=1200, reissue_time=0, set_on_exception=True, ): """ Configure a :term:`session factory` which will provide cookie-based sessions. The return value of this function is a :term:`session factory`, which may be provided as the ``session_factory`` argument of a :class:`pyramid.config.Configurator` constructor, or used as the ``session_factory`` argument of the :meth:`pyramid.config.Configurator.set_session_factory` method. The session factory returned by this function will create sessions which are limited to storing fewer than 4000 bytes of data (as the payload must fit into a single cookie). .. warning: This class provides no protection from tampering and is only intended to be used by framework authors to create their own cookie-based session factories. Parameters: ``serializer`` An object with two methods: ``loads`` and ``dumps``. The ``loads`` method should accept bytes and return a Python object. The ``dumps`` method should accept a Python object and return bytes. A ``ValueError`` should be raised for malformed inputs. ``cookie_name`` The name of the cookie used for sessioning. Default: ``'session'``. ``max_age`` The maximum age of the cookie used for sessioning (in seconds). Default: ``None`` (browser scope). ``path`` The path used for the session cookie. Default: ``'/'``. ``domain`` The domain used for the session cookie. Default: ``None`` (no domain). ``secure`` The 'secure' flag of the session cookie. Default: ``False``. ``httponly`` Hide the cookie from Javascript by setting the 'HttpOnly' flag of the session cookie. Default: ``False``. ``samesite`` The 'samesite' option of the session cookie. Set the value to ``None`` to turn off the samesite option. Default: ``'Lax'``. ``timeout`` A number of seconds of inactivity before a session times out. If ``None`` then the cookie never expires. This lifetime only applies to the *value* within the cookie. Meaning that if the cookie expires due to a lower ``max_age``, then this setting has no effect. Default: ``1200``. ``reissue_time`` The number of seconds that must pass before the cookie is automatically reissued as the result of a request which accesses the session. The duration is measured as the number of seconds since the last session cookie was issued and 'now'. If this value is ``0``, a new cookie will be reissued on every request accessing the session. If ``None`` then the cookie's lifetime will never be extended. A good rule of thumb: if you want auto-expired cookies based on inactivity: set the ``timeout`` value to 1200 (20 mins) and set the ``reissue_time`` value to perhaps a tenth of the ``timeout`` value (120 or 2 mins). It's nonsensical to set the ``timeout`` value lower than the ``reissue_time`` value, as the ticket will never be reissued. However, such a configuration is not explicitly prevented. Default: ``0``. ``set_on_exception`` If ``True``, set a session cookie even if an exception occurs while rendering a view. Default: ``True``. .. versionadded: 1.5a3 .. versionchanged: 1.10 Added the ``samesite`` option and made the default ``'Lax'``. """ @implementer(ISession) class CookieSession(dict): """Dictionary-like session object""" # configuration parameters _cookie_name = cookie_name _cookie_max_age = max_age if max_age is None else int(max_age) _cookie_path = path _cookie_domain = domain _cookie_secure = secure _cookie_httponly = httponly _cookie_samesite = samesite _cookie_on_exception = set_on_exception _timeout = timeout if timeout is None else int(timeout) _reissue_time = ( reissue_time if reissue_time is None else int(reissue_time) ) # dirty flag _dirty = False def __init__(self, request): self.request = request now = time.time() created = renewed = now new = True value = None state = {} cookieval = request.cookies.get(self._cookie_name) if cookieval is not None: try: value = serializer.loads(bytes_(cookieval)) except ValueError: # the cookie failed to deserialize, dropped value = None if value is not None: try: # since the value is not necessarily signed, we have # to unpack it a little carefully rval, cval, sval = value renewed = float(rval) created = float(cval) state = sval new = False except (TypeError, ValueError): # value failed to unpack properly or renewed was not # a numeric type so we'll fail deserialization here state = {} if self._timeout is not None: if now - renewed > self._timeout: # expire the session because it was not renewed # before the timeout threshold state = {} self.created = created self.accessed = renewed self.renewed = renewed self.new = new dict.__init__(self, state) # ISession methods def changed(self): if not self._dirty: self._dirty = True def set_cookie_callback(request, response): self._set_cookie(response) self.request = None # explicitly break cycle for gc self.request.add_response_callback(set_cookie_callback) def invalidate(self): self.clear() # XXX probably needs to unset cookie # non-modifying dictionary methods get = manage_accessed(dict.get) __getitem__ = manage_accessed(dict.__getitem__) items = manage_accessed(dict.items) values = manage_accessed(dict.values) keys = manage_accessed(dict.keys) __contains__ = manage_accessed(dict.__contains__) __len__ = manage_accessed(dict.__len__) __iter__ = manage_accessed(dict.__iter__) # modifying dictionary methods clear = manage_changed(dict.clear) update = manage_changed(dict.update) setdefault = manage_changed(dict.setdefault) pop = manage_changed(dict.pop) popitem = manage_changed(dict.popitem) __setitem__ = manage_changed(dict.__setitem__) __delitem__ = manage_changed(dict.__delitem__) # flash API methods @manage_changed def flash(self, msg, queue='', allow_duplicate=True): storage = self.setdefault('_f_' + queue, []) if allow_duplicate or (msg not in storage): storage.append(msg) @manage_changed def pop_flash(self, queue=''): storage = self.pop('_f_' + queue, []) return storage @manage_accessed def peek_flash(self, queue=''): storage = self.get('_f_' + queue, []) return storage # CSRF API methods @manage_changed def new_csrf_token(self): token = text_(binascii.hexlify(os.urandom(20))) self['_csrft_'] = token return token @manage_accessed def get_csrf_token(self): token = self.get('_csrft_', None) if token is None: token = self.new_csrf_token() return token # non-API methods def _set_cookie(self, response): if not self._cookie_on_exception: exception = getattr(self.request, 'exception', None) if ( exception is not None ): # dont set a cookie during exceptions return False cookieval = text_( serializer.dumps((self.accessed, self.created, dict(self))) ) if len(cookieval) > 4064: raise ValueError( 'Cookie value is too long to store (%s bytes)' % len(cookieval) ) response.set_cookie( self._cookie_name, value=cookieval, max_age=self._cookie_max_age, path=self._cookie_path, domain=self._cookie_domain, secure=self._cookie_secure, httponly=self._cookie_httponly, samesite=self._cookie_samesite, ) return True return CookieSession # LIB zope class implementer: """ Declare the interfaces implemented by instances of a class. This function is called as a class decorator. The arguments are one or more interfaces or interface specifications (`~zope.interface.interfaces.IDeclaration` objects). The interfaces given (including the interfaces in the specifications) are added to any interfaces previously declared, unless the interface is already implemented. Previous declarations include declarations for base classes unless implementsOnly was used. This function is provided for convenience. It provides a more convenient way to call `classImplements`. For example:: @implementer(I1) class C(object): pass is equivalent to calling:: classImplements(C, I1) after the class has been created. .. seealso:: `classImplements` The change history provided there applies to this function too. """ def __init__(self, *interfaces): self.interfaces = interfaces def __call__(self, ob): ... # LIB webob class JSONSerializer(object): """ A serializer which uses `json.dumps`` and ``json.loads``""" def dumps(self, appstruct): ... def loads(self, bstruct): # NB: json.loads raises ValueError if no json object can be decoded # so we don't have to do it explicitly here. return json.loads(text_(bstruct, encoding='utf-8')) # LIB webob class SignedSerializer(object): """ A helper to cryptographically sign arbitrary content using HMAC. The serializer accepts arbitrary functions for performing the actual serialization and deserialization. ``secret`` A string which is used to sign the cookie. The secret should be at least as long as the block size of the selected hash algorithm. For ``sha512`` this would mean a 512 bit (64 character) secret. ``salt`` A namespace to avoid collisions between different uses of a shared secret. ``hashalg`` The HMAC digest algorithm to use for signing. The algorithm must be supported by the :mod:`hashlib` library. Default: ``'sha512'``. ``serializer`` An object with two methods: `loads`` and ``dumps``. The ``loads`` method should accept bytes and return a Python object. The ``dumps`` method should accept a Python object and return bytes. A ``ValueError`` should be raised for malformed inputs. Default: ``None`, which will use a derivation of :func:`json.dumps` and ``json.loads``. """ def __init__(self, secret, salt, hashalg='sha512', serializer=None, ): self.salt = salt self.secret = secret self.hashalg = hashalg try: # bwcompat with webob <= 1.3.1, leave latin-1 as the default self.salted_secret = bytes_(salt or '') + bytes_(secret) except UnicodeEncodeError: self.salted_secret = ( bytes_(salt or '', 'utf-8') + bytes_(secret, 'utf-8')) self.digestmod = lambda string=b'': hashlib.new(self.hashalg, string) self.digest_size = self.digestmod().digest_size if serializer is None: serializer = JSONSerializer() self.serializer = serializer def dumps(self, appstruct): """ Given an ``appstruct``, serialize and sign the data. Returns a bytestring. """ ... def loads(self, bstruct): """ Given a ``bstruct`` (a bytestring), verify the signature and then deserialize and return the deserialized value. A ``ValueError`` will be raised if the signature fails to validate. """ ... # LIB webob class SignedSerializer(object): """ A helper to cryptographically sign arbitrary content using HMAC. The serializer accepts arbitrary functions for performing the actual serialization and deserialization. ``secret`` A string which is used to sign the cookie. The secret should be at least as long as the block size of the selected hash algorithm. For ``sha512`` this would mean a 512 bit (64 character) secret. ``salt`` A namespace to avoid collisions between different uses of a shared secret. ``hashalg`` The HMAC digest algorithm to use for signing. The algorithm must be supported by the :mod:`hashlib` library. Default: ``'sha512'``. ``serializer`` An object with two methods: `loads`` and ``dumps``. The ``loads`` method should accept bytes and return a Python object. The ``dumps`` method should accept a Python object and return bytes. A ``ValueError`` should be raised for malformed inputs. Default: ``None`, which will use a derivation of :func:`json.dumps` and ``json.loads``. """ def __init__(self, secret, salt, hashalg='sha512', serializer=None, ): self.salt = salt self.secret = secret self.hashalg = hashalg try: # bwcompat with webob <= 1.3.1, leave latin-1 as the default self.salted_secret = bytes_(salt or '') + bytes_(secret) except UnicodeEncodeError: self.salted_secret = ( bytes_(salt or '', 'utf-8') + bytes_(secret, 'utf-8')) self.digestmod = lambda string=b'': hashlib.new(self.hashalg, string) self.digest_size = self.digestmod().digest_size if serializer is None: serializer = JSONSerializer() self.serializer = serializer def dumps(self, appstruct): """ Given an ``appstruct``, serialize and sign the data. Returns a bytestring. """ ... def loads(self, bstruct): """ Given a ``bstruct`` (a bytestring), verify the signature and then deserialize and return the deserialized value. A ``ValueError`` will be raised if the signature fails to validate. """ try: b64padding = b'=' * (-len(bstruct) % 4) fstruct = base64.urlsafe_b64decode(bytes_(bstruct) + b64padding) except (binascii.Error, TypeError) as e: raise ValueError('Badly formed base64 data: %s' % e) cstruct = fstruct[self.digest_size:] expected_sig = fstruct[:self.digest_size] sig = hmac.new( self.salted_secret, bytes_(cstruct), self.digestmod).digest() if strings_differ(sig, expected_sig): raise ValueError('Invalid signature') return self.serializer.loads(cstruct) Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/session.py import binascii import os import pickle import time from webob.cookies import JSONSerializer, SignedSerializer from zope.deprecation import deprecated from zope.interface import implementer from pyramid.csrf import check_csrf_origin, check_csrf_token from pyramid.interfaces import ISession from pyramid.util import bytes_, text_ class PickleSerializer: """ .. deprecated:: 2.0 .. warning:: In :app:`Pyramid` 2.0 the default ``serializer`` option changed to use :class:`pyramid.session.JSONSerializer`, and ``PickleSerializer`` has been been removed from active Pyramid code. Pyramid will require JSON-serializable objects in :app:`Pyramid` 2.0. Please see :ref:`upgrading_session_20`. A serializer that uses the pickle protocol to dump Python data to bytes. This was the default serializer used by Pyramid, but has been deprecated. ``protocol`` may be specified to control the version of pickle used. Defaults to :attr:`pickle.HIGHEST_PROTOCOL`. """ def __init__(self, protocol=pickle.HIGHEST_PROTOCOL): self.protocol = protocol def loads(self, bstruct): """Accept bytes and return a Python object."""
pyramid/src/pyramid/session.py
pyramid.testing.DummySession.flash
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ # attributes created = Attribute('Integer representing Epoch time when created.') new = Attribute('Boolean attribute. If ``True``, the session is new.') # special methods def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/src/pyramid/testing.py class DummyRendererFactory: """Registered by :meth:`pyramid.config.Configurator.testing_add_renderer` as a dummy renderer factory. The indecision about what to use as a key (a spec vs. a relative name) is caused by test suites in the wild believing they can register either. The ``factory`` argument passed to this constructor is usually the *real* template renderer factory, found when ``testing_add_renderer`` is called.""" def __init__(self, name, factory): self.name = name self.factory = factory # the "real" renderer factory reg'd previously self.renderers = {} def add(self, spec, renderer): self.renderers[spec] = renderer if ':' in spec: package, relative = spec.split(':', 1) self.renderers[relative] = renderer def __call__(self, info): ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/build/lib/pyramid/config/__init__.py class Configurator( """ A Configurator is used to configure a :app:`Pyramid` :term:`application registry`. The Configurator lifecycle can be managed by using a context manager to automatically handle calling :meth:`pyramid.config.Configurator.begin` and :meth:`pyramid.config.Configurator.end` as well as :meth:`pyramid.config.Configurator.commit`. .. code-block:: python with Configurator(settings=settings) as config: config.add_route('home', '/') app = config.make_wsgi_app() If the ``registry`` argument is not ``None``, it must be an instance of the :class:`pyramid.registry.Registry` class representing the registry to configure. If ``registry`` is ``None``, the configurator will create a :class:`pyramid.registry.Registry` instance itself; it will also perform some default configuration that would not otherwise be done. After its construction, the configurator may be used to add further configuration to the registry. .. warning:: If ``registry`` is assigned the above-mentioned class instance, all other constructor arguments are ignored, with the exception of ``package``. If the ``package`` argument is passed, it must be a reference to a Python :term:`package` (e.g. ``sys.modules['thepackage']``) or a :term:`dotted Python name` to the same. This value is used as a basis to convert relative paths passed to various configuration methods, such as methods which accept a ``renderer`` argument, into absolute paths. If ``None`` is passed (the default), the package is assumed to be the Python package in which the *caller* of the ``Configurator`` constructor lives. If the ``root_package`` is passed, it will propagate through the configuration hierarchy as a way for included packages to locate resources relative to the package in which the main ``Configurator`` was created. If ``None`` is passed (the default), the ``root_package`` will be derived from the ``package`` argument. The ``package`` attribute is always pointing at the package being included when using :meth:`.include`, whereas the ``root_package`` does not change. If the ``settings`` argument is passed, it should be a Python dictionary representing the :term:`deployment settings` for this application. These are later retrievable using the :attr:`pyramid.registry.Registry.settings` attribute (aka ``request.registry.settings``). If the ``root_factory`` argument is passed, it should be an object representing the default :term:`root factory` for your application or a :term:`dotted Python name` to the same. If it is ``None``, a default root factory will be used. If ``security_policy`` is passed, it should be an instance of a :term:`security policy` or a :term:`dotted Python name` to the same. If ``authentication_policy`` is passed, it should be an instance of an :term:`authentication policy` or a :term:`dotted Python name` to the same. If ``authorization_policy`` is passed, it should be an instance of an :term:`authorization policy` or a :term:`dotted Python name` to the same. .. note:: A ``ConfigurationError`` will be raised when an authorization policy is supplied without also supplying an authentication policy (authorization requires authentication). If ``renderers`` is ``None`` (the default), a default set of :term:`renderer` factories is used. Else, it should be a list of tuples representing a set of renderer factories which should be configured into this application, and each tuple representing a set of positional values that should be passed to :meth:`pyramid.config.Configurator.add_renderer`. If ``debug_logger`` is not passed, a default debug logger that logs to a logger will be used (the logger name will be the package name of the *caller* of this configurator). If it is passed, it should be an instance of the :class:`logging.Logger` (PEP 282) standard library class or a Python logger name. The debug logger is used by :app:`Pyramid` itself to log warnings and authorization debugging information. If ``locale_negotiator`` is passed, it should be a :term:`locale negotiator` implementation or a :term:`dotted Python name` to same. See :ref:`custom_locale_negotiator`. If ``request_factory`` is passed, it should be a :term:`request factory` implementation or a :term:`dotted Python name` to the same. See :ref:`changing_the_request_factory`. By default it is ``None``, which means use the default request factory. If ``response_factory`` is passed, it should be a :term:`response factory` implementation or a :term:`dotted Python name` to the same. See :ref:`changing_the_response_factory`. By default it is ``None``, which means use the default response factory. If ``default_permission`` is passed, it should be a :term:`permission` string to be used as the default permission for all view configuration registrations performed against this Configurator. An example of a permission string:``'view'``. Adding a default permission makes it unnecessary to protect each view configuration with an explicit permission, unless your application policy requires some exception for a particular view. By default, ``default_permission`` is ``None``, meaning that view configurations which do not explicitly declare a permission will always be executable by entirely anonymous users (any authorization policy in effect is ignored). .. seealso:: See also :ref:`setting_a_default_permission`. If ``session_factory`` is passed, it should be an object which implements the :term:`session factory` interface. If a nondefault value is passed, the ``session_factory`` will be used to create a session object when ``request.session`` is accessed. Note that the same outcome can be achieved by calling :meth:`pyramid.config.Configurator.set_session_factory`. By default, this argument is ``None``, indicating that no session factory will be configured (and thus accessing ``request.session`` will throw an error) unless ``set_session_factory`` is called later during configuration. If ``autocommit`` is ``True``, every method called on the configurator will cause an immediate action, and no configuration conflict detection will be used. If ``autocommit`` is ``False``, most methods of the configurator will defer their action until :meth:`pyramid.config.Configurator.commit` is called. When :meth:`pyramid.config.Configurator.commit` is called, the actions implied by the called methods will be checked for configuration conflicts unless ``autocommit`` is ``True``. If a conflict is detected, a ``ConfigurationConflictError`` will be raised. Calling :meth:`pyramid.config.Configurator.make_wsgi_app` always implies a final commit. If ``default_view_mapper`` is passed, it will be used as the default :term:`view mapper` factory for view configurations that don't otherwise specify one (see :class:`pyramid.interfaces.IViewMapperFactory`). If ``default_view_mapper`` is not passed, a superdefault view mapper will be used. If ``exceptionresponse_view`` is passed, it must be a :term:`view callable` or ``None``. If it is a view callable, it will be used as an exception view callable when an :term:`exception response` is raised. If ``exceptionresponse_view`` is ``None``, no exception response view will be registered, and all raised exception responses will be bubbled up to Pyramid's caller. By default, the ``pyramid.httpexceptions.default_exceptionresponse_view`` function is used as the ``exceptionresponse_view``. If ``route_prefix`` is passed, all routes added with :meth:`pyramid.config.Configurator.add_route` will have the specified path prepended to their pattern. If ``introspection`` is passed, it must be a boolean value. If it's ``True``, introspection values during actions will be kept for use for tools like the debug toolbar. If it's ``False``, introspection values provided by registrations will be ignored. By default, it is ``True``. .. versionadded:: 1.1 The ``exceptionresponse_view`` argument. .. versionadded:: 1.2 The ``route_prefix`` argument. .. versionadded:: 1.3 The ``introspection`` argument. .. versionadded:: 1.6 The ``root_package`` argument. The ``response_factory`` argument. .. versionadded:: 1.9 The ability to use the configurator as a context manager with the ``with``-statement to make threadlocal configuration available for further configuration with an implicit commit. """ def __init__( self, registry=None, package=None, settings=None, root_factory=None, security_policy=None, authentication_policy=None, authorization_policy=None, renderers=None, debug_logger=None, locale_negotiator=None, request_factory=None, response_factory=None, default_permission=None, session_factory=None, default_view_mapper=None, autocommit=False, exceptionresponse_view=default_exceptionresponse_view, route_prefix=None, introspection=True, root_package=None, ): if package is None: package = caller_package() if root_package is None: root_package = package name_resolver = DottedNameResolver(package) self.name_resolver = name_resolver self.package_name = name_resolver.get_package_name() self.package = name_resolver.get_package() self.root_package = root_package self.registry = registry self.autocommit = autocommit self.route_prefix = route_prefix self.introspection = introspection if registry is None: registry = Registry(self.package_name) self.registry = registry self.setup_registry( settings=settings, root_factory=root_factory, authentication_policy=authentication_policy, authorization_policy=authorization_policy, security_policy=security_policy, renderers=renderers, debug_logger=debug_logger, locale_negotiator=locale_negotiator, request_factory=request_factory, response_factory=response_factory, default_permission=default_permission, session_factory=session_factory, default_view_mapper=default_view_mapper, exceptionresponse_view=exceptionresponse_view, ) def setup_registry( """When you pass a non-``None`` ``registry`` argument to the :term:`Configurator` constructor, no initial setup is performed against the registry. This is because the registry you pass in may have already been initialized for use under :app:`Pyramid` via a different configurator. However, in some circumstances (such as when you want to use a global registry instead of a registry created as a result of the Configurator constructor), or when you want to reset the initial setup of a registry, you *do* want to explicitly initialize the registry associated with a Configurator for use under :app:`Pyramid`. Use ``setup_registry`` to do this initialization. ``setup_registry`` configures settings, a root factory, security policies, renderers, a debug logger, a locale negotiator, and various other settings using the configurator's current registry, as per the descriptions in the Configurator constructor.""" ... def _make_spec(self, path_or_spec): ... def _fix_registry(self): """Fix up a ZCA component registry that is not a pyramid.registry.Registry by adding analogues of ``has_listeners``, ``notify``, ``queryAdapterOrSelf``, and ``registerSelfAdapter`` through monkey-patching.""" ... def _get_introspector(self): ... def _set_introspector(self, introspector): ... def _del_introspector(self): ... def include(self, callable, route_prefix=None): """Include a configuration callable, to support imperative application extensibility. .. warning:: In versions of :app:`Pyramid` prior to 1.2, this function accepted ``*callables``, but this has been changed to support only a single callable. A configuration callable should be a callable that accepts a single argument named ``config``, which will be an instance of a :term:`Configurator`. However, be warned that it will not be the same configurator instance on which you call this method. The code which runs as a result of calling the callable should invoke methods on the configurator passed to it which add configuration state. The return value of a callable will be ignored. Values allowed to be presented via the ``callable`` argument to this method: any callable Python object or any :term:`dotted Python name` which resolves to a callable Python object. It may also be a Python :term:`module`, in which case, the module will be searched for a callable named ``includeme``, which will be treated as the configuration callable. For example, if the ``includeme`` function below lives in a module named ``myapp.myconfig``: .. code-block:: python :linenos: # myapp.myconfig module def my_view(request): from pyramid.response import Response return Response('OK') def includeme(config): config.add_view(my_view) You might cause it to be included within your Pyramid application like so: .. code-block:: python :linenos: from pyramid.config import Configurator def main(global_config, **settings): config = Configurator() config.include('myapp.myconfig.includeme') Because the function is named ``includeme``, the function name can also be omitted from the dotted name reference: .. code-block:: python :linenos: from pyramid.config import Configurator def main(global_config, **settings): config = Configurator() config.include('myapp.myconfig') Included configuration statements will be overridden by local configuration statements if an included callable causes a configuration conflict by registering something with the same configuration parameters. If the ``route_prefix`` is supplied, it must be a string and will have a similar effect to using :meth:`pyramid.config.Configurator.route_prefix_context`. Any calls to :meth:`pyramid.config.Configurator.add_route` within the included callable will have their pattern prefixed with the value of ``route_prefix``. This can be used to help mount a set of routes at a different location than the included callable's author intended, while still maintaining the same route names. For example: .. code-block:: python :linenos: from pyramid.config import Configurator def included(config): config.add_route('show_users', '/show') def main(global_config, **settings): config = Configurator() config.include(included, route_prefix='/users') In the above configuration, the ``show_users`` route will have an effective route pattern of ``/users/show``, instead of ``/show`` because the ``route_prefix`` argument will be prepended to the pattern. .. versionadded:: 1.2 The ``route_prefix`` parameter. .. versionchanged:: 1.9 The included function is wrapped with a call to :meth:`pyramid.config.Configurator.begin` and :meth:`pyramid.config.Configurator.end` while it is executed. """ ... def add_directive(self, name, directive, action_wrap=True): """ Add a directive method to the configurator. .. warning:: This method is typically only used by :app:`Pyramid` framework extension authors, not by :app:`Pyramid` application developers. Framework extenders can add directive methods to a configurator by instructing their users to call ``config.add_directive('somename', 'some.callable')``. This will make ``some.callable`` accessible as ``config.somename``. ``some.callable`` should be a function which accepts ``config`` as a first argument, and arbitrary positional and keyword arguments following. It should use config.action as necessary to perform actions. Directive methods can then be invoked like 'built-in' directives such as ``add_view``, ``add_route``, etc. The ``action_wrap`` argument should be ``True`` for directives which perform ``config.action`` with potentially conflicting discriminators. ``action_wrap`` will cause the directive to be wrapped in a decorator which provides more accurate conflict cause information. ``add_directive`` does not participate in conflict detection, and later calls to ``add_directive`` will override earlier calls. """ ... def __getattr__(self, name): ... def with_package(self, package): """Return a new Configurator instance with the same registry as this configurator. ``package`` may be an actual Python package object or a :term:`dotted Python name` representing a package.""" ... def maybe_dotted(self, dotted): """Resolve the :term:`dotted Python name` ``dotted`` to a global Python object. If ``dotted`` is not a string, return it without attempting to do any name resolution. If ``dotted`` is a relative dotted name (e.g. ``.foo.bar``, consider it relative to the ``package`` argument supplied to this Configurator's constructor.""" ... def absolute_asset_spec(self, relative_spec): """Resolve the potentially relative :term:`asset specification` string passed as ``relative_spec`` into an absolute asset specification string and return the string. Use the ``package`` of this configurator as the package to which the asset specification will be considered relative when generating an absolute asset specification. If the provided ``relative_spec`` argument is already absolute, or if the ``relative_spec`` is not a string, it is simply returned.""" ... def begin(self, request=_marker): """Indicate that application or test configuration has begun. This pushes a dictionary containing the :term:`application registry` implied by ``registry`` attribute of this configurator and the :term:`request` implied by the ``request`` argument onto the :term:`thread local` stack consulted by various :mod:`pyramid.threadlocal` API functions. If ``request`` is not specified and the registry owned by the configurator is already pushed as the current threadlocal registry then this method will keep the current threadlocal request unchanged. .. versionchanged:: 1.8 The current threadlocal request is propagated if the current threadlocal registry remains unchanged. """ ... def end(self): """Indicate that application or test configuration has ended. This pops the last value pushed onto the :term:`thread local` stack (usually by the ``begin`` method) and returns that value. """ return self.manager.pop() def __enter__(self): ... def __exit__(self, exc_type, exc_value, exc_traceback): ... def scan( """Scan a Python package and any of its subpackages for objects marked with :term:`configuration decoration` such as :class:`pyramid.view.view_config`. Any decorated object found will influence the current configuration state. The ``package`` argument should be a Python :term:`package` or module object (or a :term:`dotted Python name` which refers to such a package or module). If ``package`` is ``None``, the package of the *caller* is used. The ``categories`` argument, if provided, should be the :term:`Venusian` 'scan categories' to use during scanning. Providing this argument is not often necessary; specifying scan categories is an extremely advanced usage. By default, ``categories`` is ``['pyramid']`` which will execute only :app:`Pyramid`-related Venusian decorator callbacks such as from :class:`pyramid.view.view_config`. See the :term:`Venusian` documentation for more information about limiting a scan by using an explicit set of categories. Pass ``None`` to pick up *all* Venusian decorators. The ``onerror`` argument, if provided, should be a Venusian ``onerror`` callback function. The onerror function is passed to :meth:`venusian.Scanner.scan` to influence error behavior when an exception is raised during the scanning process. See the :term:`Venusian` documentation for more information about ``onerror`` callbacks. The ``ignore`` argument, if provided, should be a Venusian ``ignore`` value. Providing an ``ignore`` argument allows the scan to ignore particular modules, packages, or global objects during a scan. ``ignore`` can be a string or a callable, or a list containing strings or callables. The simplest usage of ``ignore`` is to provide a module or package by providing a full path to its dotted name. For example: ``config.scan(ignore='my.module.subpackage')`` would ignore the ``my.module.subpackage`` package during a scan, which would prevent the subpackage and any of its submodules from being imported and scanned. See the :term:`Venusian` documentation for more information about the ``ignore`` argument. To perform a ``scan``, Pyramid creates a Venusian ``Scanner`` object. The ``kw`` argument represents a set of keyword arguments to pass to the Venusian ``Scanner`` object's constructor. See the :term:`venusian` documentation (its ``Scanner`` class) for more information about the constructor. By default, the only keyword arguments passed to the Scanner constructor are ``{'config':self}`` where ``self`` is this configurator object. This services the requirement of all built-in Pyramid decorators, but extension systems may require additional arguments. Providing this argument is not often necessary; it's an advanced usage. .. versionadded:: 1.1 The ``**kw`` argument. .. versionadded:: 1.3 The ``ignore`` argument. .. versionchanged:: 2.0 The ``categories`` argument now defaults to ``['pyramid']`` instead of ``None`` to control which decorator callbacks are executed. """ ... def make_wsgi_app(self): """Commits any pending configuration statements, sends a :class:`pyramid.events.ApplicationCreated` event to all listeners, adds this configuration's registry to :attr:`pyramid.config.global_registries`, and returns a :app:`Pyramid` WSGI application representing the committed configuration state.""" ... Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/testing.py from contextlib import contextmanager import copy import os from webob.acceptparse import create_accept_header from zope.interface import alsoProvides, implementer from pyramid.config import Configurator from pyramid.decorator import reify from pyramid.i18n import LocalizerRequestMixin from pyramid.interfaces import IRequest, ISession from pyramid.path import caller_package from pyramid.registry import Registry from pyramid.request import CallbackMethodsMixin from pyramid.security import AuthenticationAPIMixin, SecurityAPIMixin from pyramid.threadlocal import get_current_registry, manager from pyramid.url import URLMethodsMixin from pyramid.util import PYPY, InstancePropertyMixin from pyramid.view import ViewMethodsMixin from pyramid.response import _get_response_factory from zope.component import getSiteManager class DummySession(dict): created = None new = True def changed(self): pass def invalidate(self): self.clear() def flash(self, msg, queue='', allow_duplicate=True):
pyramid/src/pyramid/testing.py
pyramid.testing.DummySession.pop_flash
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ # attributes created = Attribute('Integer representing Epoch time when created.') new = Attribute('Boolean attribute. If ``True``, the session is new.') # special methods def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/src/pyramid/testing.py class DummyResource: """A dummy :app:`Pyramid` :term:`resource` object.""" def __init__( self, __name__=None, __parent__=None, __provides__=None, **kw ): """The resource's ``__name__`` attribute will be set to the value of the ``__name__`` argument, and the resource's ``__parent__`` attribute will be set to the value of the ``__parent__`` argument. If ``__provides__`` is specified, it should be an interface object or tuple of interface objects that will be attached to the resulting resource via :func:`zope.interface.alsoProvides`. Any extra keywords passed in the ``kw`` argument will be set as direct attributes of the resource object. .. note:: For backwards compatibility purposes, this class can also be imported as :class:`pyramid.testing.DummyModel`. """ self.__name__ = __name__ self.__parent__ = __parent__ if __provides__ is not None: alsoProvides(self, __provides__) self.kw = kw self.__dict__.update(**kw) self.subs = {} def __setitem__(self, name, val): """When the ``__setitem__`` method is called, the object passed in as ``val`` will be decorated with a ``__parent__`` attribute pointing at the dummy resource and a ``__name__`` attribute that is the value of ``name``. The value will then be returned when dummy resource's ``__getitem__`` is called with the name ``name```.""" ... def __getitem__(self, name): """Return a named subobject (see ``__setitem__``)""" ... def __delitem__(self, name): ... def get(self, name, default=None): return self.subs.get(name, default) def values(self): """Return the values set by __setitem__""" ... def items(self): """Return the items set by __setitem__""" ... def keys(self): """Return the keys set by __setitem__""" ... def __bool__(self): ... def __len__(self): ... def __contains__(self, name): ... def clone(self, __name__=_marker, __parent__=_marker, **kw): """Create a clone of the resource object. If ``__name__`` or ``__parent__`` arguments are passed, use these values to override the existing ``__name__`` or ``__parent__`` of the resource. If any extra keyword args are passed in via the ``kw`` argument, use these keywords to add to or override existing resource keywords (attributes).""" ... # FILE pyramid/src/pyramid/testing.py class DummyRendererFactory: """Registered by :meth:`pyramid.config.Configurator.testing_add_renderer` as a dummy renderer factory. The indecision about what to use as a key (a spec vs. a relative name) is caused by test suites in the wild believing they can register either. The ``factory`` argument passed to this constructor is usually the *real* template renderer factory, found when ``testing_add_renderer`` is called.""" def __init__(self, name, factory): self.name = name self.factory = factory # the "real" renderer factory reg'd previously self.renderers = {} def add(self, spec, renderer): self.renderers[spec] = renderer if ':' in spec: package, relative = spec.split(':', 1) self.renderers[relative] = renderer def __call__(self, info): ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/build/lib/pyramid/config/__init__.py class Configurator( """ A Configurator is used to configure a :app:`Pyramid` :term:`application registry`. The Configurator lifecycle can be managed by using a context manager to automatically handle calling :meth:`pyramid.config.Configurator.begin` and :meth:`pyramid.config.Configurator.end` as well as :meth:`pyramid.config.Configurator.commit`. .. code-block:: python with Configurator(settings=settings) as config: config.add_route('home', '/') app = config.make_wsgi_app() If the ``registry`` argument is not ``None``, it must be an instance of the :class:`pyramid.registry.Registry` class representing the registry to configure. If ``registry`` is ``None``, the configurator will create a :class:`pyramid.registry.Registry` instance itself; it will also perform some default configuration that would not otherwise be done. After its construction, the configurator may be used to add further configuration to the registry. .. warning:: If ``registry`` is assigned the above-mentioned class instance, all other constructor arguments are ignored, with the exception of ``package``. If the ``package`` argument is passed, it must be a reference to a Python :term:`package` (e.g. ``sys.modules['thepackage']``) or a :term:`dotted Python name` to the same. This value is used as a basis to convert relative paths passed to various configuration methods, such as methods which accept a ``renderer`` argument, into absolute paths. If ``None`` is passed (the default), the package is assumed to be the Python package in which the *caller* of the ``Configurator`` constructor lives. If the ``root_package`` is passed, it will propagate through the configuration hierarchy as a way for included packages to locate resources relative to the package in which the main ``Configurator`` was created. If ``None`` is passed (the default), the ``root_package`` will be derived from the ``package`` argument. The ``package`` attribute is always pointing at the package being included when using :meth:`.include`, whereas the ``root_package`` does not change. If the ``settings`` argument is passed, it should be a Python dictionary representing the :term:`deployment settings` for this application. These are later retrievable using the :attr:`pyramid.registry.Registry.settings` attribute (aka ``request.registry.settings``). If the ``root_factory`` argument is passed, it should be an object representing the default :term:`root factory` for your application or a :term:`dotted Python name` to the same. If it is ``None``, a default root factory will be used. If ``security_policy`` is passed, it should be an instance of a :term:`security policy` or a :term:`dotted Python name` to the same. If ``authentication_policy`` is passed, it should be an instance of an :term:`authentication policy` or a :term:`dotted Python name` to the same. If ``authorization_policy`` is passed, it should be an instance of an :term:`authorization policy` or a :term:`dotted Python name` to the same. .. note:: A ``ConfigurationError`` will be raised when an authorization policy is supplied without also supplying an authentication policy (authorization requires authentication). If ``renderers`` is ``None`` (the default), a default set of :term:`renderer` factories is used. Else, it should be a list of tuples representing a set of renderer factories which should be configured into this application, and each tuple representing a set of positional values that should be passed to :meth:`pyramid.config.Configurator.add_renderer`. If ``debug_logger`` is not passed, a default debug logger that logs to a logger will be used (the logger name will be the package name of the *caller* of this configurator). If it is passed, it should be an instance of the :class:`logging.Logger` (PEP 282) standard library class or a Python logger name. The debug logger is used by :app:`Pyramid` itself to log warnings and authorization debugging information. If ``locale_negotiator`` is passed, it should be a :term:`locale negotiator` implementation or a :term:`dotted Python name` to same. See :ref:`custom_locale_negotiator`. If ``request_factory`` is passed, it should be a :term:`request factory` implementation or a :term:`dotted Python name` to the same. See :ref:`changing_the_request_factory`. By default it is ``None``, which means use the default request factory. If ``response_factory`` is passed, it should be a :term:`response factory` implementation or a :term:`dotted Python name` to the same. See :ref:`changing_the_response_factory`. By default it is ``None``, which means use the default response factory. If ``default_permission`` is passed, it should be a :term:`permission` string to be used as the default permission for all view configuration registrations performed against this Configurator. An example of a permission string:``'view'``. Adding a default permission makes it unnecessary to protect each view configuration with an explicit permission, unless your application policy requires some exception for a particular view. By default, ``default_permission`` is ``None``, meaning that view configurations which do not explicitly declare a permission will always be executable by entirely anonymous users (any authorization policy in effect is ignored). .. seealso:: See also :ref:`setting_a_default_permission`. If ``session_factory`` is passed, it should be an object which implements the :term:`session factory` interface. If a nondefault value is passed, the ``session_factory`` will be used to create a session object when ``request.session`` is accessed. Note that the same outcome can be achieved by calling :meth:`pyramid.config.Configurator.set_session_factory`. By default, this argument is ``None``, indicating that no session factory will be configured (and thus accessing ``request.session`` will throw an error) unless ``set_session_factory`` is called later during configuration. If ``autocommit`` is ``True``, every method called on the configurator will cause an immediate action, and no configuration conflict detection will be used. If ``autocommit`` is ``False``, most methods of the configurator will defer their action until :meth:`pyramid.config.Configurator.commit` is called. When :meth:`pyramid.config.Configurator.commit` is called, the actions implied by the called methods will be checked for configuration conflicts unless ``autocommit`` is ``True``. If a conflict is detected, a ``ConfigurationConflictError`` will be raised. Calling :meth:`pyramid.config.Configurator.make_wsgi_app` always implies a final commit. If ``default_view_mapper`` is passed, it will be used as the default :term:`view mapper` factory for view configurations that don't otherwise specify one (see :class:`pyramid.interfaces.IViewMapperFactory`). If ``default_view_mapper`` is not passed, a superdefault view mapper will be used. If ``exceptionresponse_view`` is passed, it must be a :term:`view callable` or ``None``. If it is a view callable, it will be used as an exception view callable when an :term:`exception response` is raised. If ``exceptionresponse_view`` is ``None``, no exception response view will be registered, and all raised exception responses will be bubbled up to Pyramid's caller. By default, the ``pyramid.httpexceptions.default_exceptionresponse_view`` function is used as the ``exceptionresponse_view``. If ``route_prefix`` is passed, all routes added with :meth:`pyramid.config.Configurator.add_route` will have the specified path prepended to their pattern. If ``introspection`` is passed, it must be a boolean value. If it's ``True``, introspection values during actions will be kept for use for tools like the debug toolbar. If it's ``False``, introspection values provided by registrations will be ignored. By default, it is ``True``. .. versionadded:: 1.1 The ``exceptionresponse_view`` argument. .. versionadded:: 1.2 The ``route_prefix`` argument. .. versionadded:: 1.3 The ``introspection`` argument. .. versionadded:: 1.6 The ``root_package`` argument. The ``response_factory`` argument. .. versionadded:: 1.9 The ability to use the configurator as a context manager with the ``with``-statement to make threadlocal configuration available for further configuration with an implicit commit. """ def __init__( self, registry=None, package=None, settings=None, root_factory=None, security_policy=None, authentication_policy=None, authorization_policy=None, renderers=None, debug_logger=None, locale_negotiator=None, request_factory=None, response_factory=None, default_permission=None, session_factory=None, default_view_mapper=None, autocommit=False, exceptionresponse_view=default_exceptionresponse_view, route_prefix=None, introspection=True, root_package=None, ): if package is None: package = caller_package() if root_package is None: root_package = package name_resolver = DottedNameResolver(package) self.name_resolver = name_resolver self.package_name = name_resolver.get_package_name() self.package = name_resolver.get_package() self.root_package = root_package self.registry = registry self.autocommit = autocommit self.route_prefix = route_prefix self.introspection = introspection if registry is None: registry = Registry(self.package_name) self.registry = registry self.setup_registry( settings=settings, root_factory=root_factory, authentication_policy=authentication_policy, authorization_policy=authorization_policy, security_policy=security_policy, renderers=renderers, debug_logger=debug_logger, locale_negotiator=locale_negotiator, request_factory=request_factory, response_factory=response_factory, default_permission=default_permission, session_factory=session_factory, default_view_mapper=default_view_mapper, exceptionresponse_view=exceptionresponse_view, ) def setup_registry( """When you pass a non-``None`` ``registry`` argument to the :term:`Configurator` constructor, no initial setup is performed against the registry. This is because the registry you pass in may have already been initialized for use under :app:`Pyramid` via a different configurator. However, in some circumstances (such as when you want to use a global registry instead of a registry created as a result of the Configurator constructor), or when you want to reset the initial setup of a registry, you *do* want to explicitly initialize the registry associated with a Configurator for use under :app:`Pyramid`. Use ``setup_registry`` to do this initialization. ``setup_registry`` configures settings, a root factory, security policies, renderers, a debug logger, a locale negotiator, and various other settings using the configurator's current registry, as per the descriptions in the Configurator constructor.""" ... def _make_spec(self, path_or_spec): ... def _fix_registry(self): """Fix up a ZCA component registry that is not a pyramid.registry.Registry by adding analogues of ``has_listeners``, ``notify``, ``queryAdapterOrSelf``, and ``registerSelfAdapter`` through monkey-patching.""" ... def _get_introspector(self): ... def _set_introspector(self, introspector): ... def _del_introspector(self): ... def include(self, callable, route_prefix=None): """Include a configuration callable, to support imperative application extensibility. .. warning:: In versions of :app:`Pyramid` prior to 1.2, this function accepted ``*callables``, but this has been changed to support only a single callable. A configuration callable should be a callable that accepts a single argument named ``config``, which will be an instance of a :term:`Configurator`. However, be warned that it will not be the same configurator instance on which you call this method. The code which runs as a result of calling the callable should invoke methods on the configurator passed to it which add configuration state. The return value of a callable will be ignored. Values allowed to be presented via the ``callable`` argument to this method: any callable Python object or any :term:`dotted Python name` which resolves to a callable Python object. It may also be a Python :term:`module`, in which case, the module will be searched for a callable named ``includeme``, which will be treated as the configuration callable. For example, if the ``includeme`` function below lives in a module named ``myapp.myconfig``: .. code-block:: python :linenos: # myapp.myconfig module def my_view(request): from pyramid.response import Response return Response('OK') def includeme(config): config.add_view(my_view) You might cause it to be included within your Pyramid application like so: .. code-block:: python :linenos: from pyramid.config import Configurator def main(global_config, **settings): config = Configurator() config.include('myapp.myconfig.includeme') Because the function is named ``includeme``, the function name can also be omitted from the dotted name reference: .. code-block:: python :linenos: from pyramid.config import Configurator def main(global_config, **settings): config = Configurator() config.include('myapp.myconfig') Included configuration statements will be overridden by local configuration statements if an included callable causes a configuration conflict by registering something with the same configuration parameters. If the ``route_prefix`` is supplied, it must be a string and will have a similar effect to using :meth:`pyramid.config.Configurator.route_prefix_context`. Any calls to :meth:`pyramid.config.Configurator.add_route` within the included callable will have their pattern prefixed with the value of ``route_prefix``. This can be used to help mount a set of routes at a different location than the included callable's author intended, while still maintaining the same route names. For example: .. code-block:: python :linenos: from pyramid.config import Configurator def included(config): config.add_route('show_users', '/show') def main(global_config, **settings): config = Configurator() config.include(included, route_prefix='/users') In the above configuration, the ``show_users`` route will have an effective route pattern of ``/users/show``, instead of ``/show`` because the ``route_prefix`` argument will be prepended to the pattern. .. versionadded:: 1.2 The ``route_prefix`` parameter. .. versionchanged:: 1.9 The included function is wrapped with a call to :meth:`pyramid.config.Configurator.begin` and :meth:`pyramid.config.Configurator.end` while it is executed. """ ... def add_directive(self, name, directive, action_wrap=True): """ Add a directive method to the configurator. .. warning:: This method is typically only used by :app:`Pyramid` framework extension authors, not by :app:`Pyramid` application developers. Framework extenders can add directive methods to a configurator by instructing their users to call ``config.add_directive('somename', 'some.callable')``. This will make ``some.callable`` accessible as ``config.somename``. ``some.callable`` should be a function which accepts ``config`` as a first argument, and arbitrary positional and keyword arguments following. It should use config.action as necessary to perform actions. Directive methods can then be invoked like 'built-in' directives such as ``add_view``, ``add_route``, etc. The ``action_wrap`` argument should be ``True`` for directives which perform ``config.action`` with potentially conflicting discriminators. ``action_wrap`` will cause the directive to be wrapped in a decorator which provides more accurate conflict cause information. ``add_directive`` does not participate in conflict detection, and later calls to ``add_directive`` will override earlier calls. """ ... def __getattr__(self, name): ... def with_package(self, package): """Return a new Configurator instance with the same registry as this configurator. ``package`` may be an actual Python package object or a :term:`dotted Python name` representing a package.""" ... def maybe_dotted(self, dotted): """Resolve the :term:`dotted Python name` ``dotted`` to a global Python object. If ``dotted`` is not a string, return it without attempting to do any name resolution. If ``dotted`` is a relative dotted name (e.g. ``.foo.bar``, consider it relative to the ``package`` argument supplied to this Configurator's constructor.""" ... def absolute_asset_spec(self, relative_spec): """Resolve the potentially relative :term:`asset specification` string passed as ``relative_spec`` into an absolute asset specification string and return the string. Use the ``package`` of this configurator as the package to which the asset specification will be considered relative when generating an absolute asset specification. If the provided ``relative_spec`` argument is already absolute, or if the ``relative_spec`` is not a string, it is simply returned.""" ... def begin(self, request=_marker): """Indicate that application or test configuration has begun. This pushes a dictionary containing the :term:`application registry` implied by ``registry`` attribute of this configurator and the :term:`request` implied by the ``request`` argument onto the :term:`thread local` stack consulted by various :mod:`pyramid.threadlocal` API functions. If ``request`` is not specified and the registry owned by the configurator is already pushed as the current threadlocal registry then this method will keep the current threadlocal request unchanged. .. versionchanged:: 1.8 The current threadlocal request is propagated if the current threadlocal registry remains unchanged. """ ... def end(self): """Indicate that application or test configuration has ended. This pops the last value pushed onto the :term:`thread local` stack (usually by the ``begin`` method) and returns that value. """ return self.manager.pop() def __enter__(self): ... def __exit__(self, exc_type, exc_value, exc_traceback): ... def scan( """Scan a Python package and any of its subpackages for objects marked with :term:`configuration decoration` such as :class:`pyramid.view.view_config`. Any decorated object found will influence the current configuration state. The ``package`` argument should be a Python :term:`package` or module object (or a :term:`dotted Python name` which refers to such a package or module). If ``package`` is ``None``, the package of the *caller* is used. The ``categories`` argument, if provided, should be the :term:`Venusian` 'scan categories' to use during scanning. Providing this argument is not often necessary; specifying scan categories is an extremely advanced usage. By default, ``categories`` is ``['pyramid']`` which will execute only :app:`Pyramid`-related Venusian decorator callbacks such as from :class:`pyramid.view.view_config`. See the :term:`Venusian` documentation for more information about limiting a scan by using an explicit set of categories. Pass ``None`` to pick up *all* Venusian decorators. The ``onerror`` argument, if provided, should be a Venusian ``onerror`` callback function. The onerror function is passed to :meth:`venusian.Scanner.scan` to influence error behavior when an exception is raised during the scanning process. See the :term:`Venusian` documentation for more information about ``onerror`` callbacks. The ``ignore`` argument, if provided, should be a Venusian ``ignore`` value. Providing an ``ignore`` argument allows the scan to ignore particular modules, packages, or global objects during a scan. ``ignore`` can be a string or a callable, or a list containing strings or callables. The simplest usage of ``ignore`` is to provide a module or package by providing a full path to its dotted name. For example: ``config.scan(ignore='my.module.subpackage')`` would ignore the ``my.module.subpackage`` package during a scan, which would prevent the subpackage and any of its submodules from being imported and scanned. See the :term:`Venusian` documentation for more information about the ``ignore`` argument. To perform a ``scan``, Pyramid creates a Venusian ``Scanner`` object. The ``kw`` argument represents a set of keyword arguments to pass to the Venusian ``Scanner`` object's constructor. See the :term:`venusian` documentation (its ``Scanner`` class) for more information about the constructor. By default, the only keyword arguments passed to the Scanner constructor are ``{'config':self}`` where ``self`` is this configurator object. This services the requirement of all built-in Pyramid decorators, but extension systems may require additional arguments. Providing this argument is not often necessary; it's an advanced usage. .. versionadded:: 1.1 The ``**kw`` argument. .. versionadded:: 1.3 The ``ignore`` argument. .. versionchanged:: 2.0 The ``categories`` argument now defaults to ``['pyramid']`` instead of ``None`` to control which decorator callbacks are executed. """ ... def make_wsgi_app(self): """Commits any pending configuration statements, sends a :class:`pyramid.events.ApplicationCreated` event to all listeners, adds this configuration's registry to :attr:`pyramid.config.global_registries`, and returns a :app:`Pyramid` WSGI application representing the committed configuration state.""" ... Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/testing.py from contextlib import contextmanager import copy import os from webob.acceptparse import create_accept_header from zope.interface import alsoProvides, implementer from pyramid.config import Configurator from pyramid.decorator import reify from pyramid.i18n import LocalizerRequestMixin from pyramid.interfaces import IRequest, ISession from pyramid.path import caller_package from pyramid.registry import Registry from pyramid.request import CallbackMethodsMixin from pyramid.security import AuthenticationAPIMixin, SecurityAPIMixin from pyramid.threadlocal import get_current_registry, manager from pyramid.url import URLMethodsMixin from pyramid.util import PYPY, InstancePropertyMixin from pyramid.view import ViewMethodsMixin from pyramid.response import _get_response_factory from zope.component import getSiteManager class DummySession(dict): created = None new = True def changed(self): pass def invalidate(self): self.clear() def flash(self, msg, queue='', allow_duplicate=True): storage = self.setdefault('_f_' + queue, []) if allow_duplicate or (msg not in storage): storage.append(msg) def pop_flash(self, queue=''):
pyramid/src/pyramid/testing.py
pyramid.testing.DummySession.peek_flash
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/testing.py class DummySecurityPolicy: """A standin for a :term:`security policy`.""" def __init__( self, userid=None, identity=None, permissive=True, remember_result=None, forget_result=None, ): self.userid = userid self._identity = identity self.permissive = permissive if remember_result is None: remember_result = [] if forget_result is None: forget_result = [] self.remember_result = remember_result self.forget_result = forget_result def identity(self, request): return self._identity def authenticated_userid(self, request): return self.userid def permits(self, request, context, permission): return self.permissive def remember(self, request, userid, **kw): self.remembered = userid return self.remember_result def forget(self, request, **kw): self.forgotten = True return self.forget_result # FILE pyramid/src/pyramid/testing.py class DummySecurityPolicy: """A standin for a :term:`security policy`.""" def __init__( self, userid=None, identity=None, permissive=True, remember_result=None, forget_result=None, ): self.userid = userid self._identity = identity self.permissive = permissive if remember_result is None: remember_result = [] if forget_result is None: forget_result = [] self.remember_result = remember_result self.forget_result = forget_result def identity(self, request): ... def authenticated_userid(self, request): ... def permits(self, request, context, permission): ... def remember(self, request, userid, **kw): ... def forget(self, request, **kw): ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ # attributes created = Attribute('Integer representing Epoch time when created.') new = Attribute('Boolean attribute. If ``True``, the session is new.') # special methods def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/testing.py class DummyResource: """A dummy :app:`Pyramid` :term:`resource` object.""" def __init__( self, __name__=None, __parent__=None, __provides__=None, **kw ): """The resource's ``__name__`` attribute will be set to the value of the ``__name__`` argument, and the resource's ``__parent__`` attribute will be set to the value of the ``__parent__`` argument. If ``__provides__`` is specified, it should be an interface object or tuple of interface objects that will be attached to the resulting resource via :func:`zope.interface.alsoProvides`. Any extra keywords passed in the ``kw`` argument will be set as direct attributes of the resource object. .. note:: For backwards compatibility purposes, this class can also be imported as :class:`pyramid.testing.DummyModel`. """ self.__name__ = __name__ self.__parent__ = __parent__ if __provides__ is not None: alsoProvides(self, __provides__) self.kw = kw self.__dict__.update(**kw) self.subs = {} def __setitem__(self, name, val): """When the ``__setitem__`` method is called, the object passed in as ``val`` will be decorated with a ``__parent__`` attribute pointing at the dummy resource and a ``__name__`` attribute that is the value of ``name``. The value will then be returned when dummy resource's ``__getitem__`` is called with the name ``name```.""" ... def __getitem__(self, name): """Return a named subobject (see ``__setitem__``)""" ... def __delitem__(self, name): ... def get(self, name, default=None): return self.subs.get(name, default) def values(self): """Return the values set by __setitem__""" ... def items(self): """Return the items set by __setitem__""" ... def keys(self): """Return the keys set by __setitem__""" ... def __bool__(self): ... def __len__(self): ... def __contains__(self, name): ... def clone(self, __name__=_marker, __parent__=_marker, **kw): """Create a clone of the resource object. If ``__name__`` or ``__parent__`` arguments are passed, use these values to override the existing ``__name__`` or ``__parent__`` of the resource. If any extra keyword args are passed in via the ``kw`` argument, use these keywords to add to or override existing resource keywords (attributes).""" ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/testing.py from contextlib import contextmanager import copy import os from webob.acceptparse import create_accept_header from zope.interface import alsoProvides, implementer from pyramid.config import Configurator from pyramid.decorator import reify from pyramid.i18n import LocalizerRequestMixin from pyramid.interfaces import IRequest, ISession from pyramid.path import caller_package from pyramid.registry import Registry from pyramid.request import CallbackMethodsMixin from pyramid.security import AuthenticationAPIMixin, SecurityAPIMixin from pyramid.threadlocal import get_current_registry, manager from pyramid.url import URLMethodsMixin from pyramid.util import PYPY, InstancePropertyMixin from pyramid.view import ViewMethodsMixin from pyramid.response import _get_response_factory from zope.component import getSiteManager class DummySession(dict): created = None new = True def changed(self): pass def invalidate(self): self.clear() def flash(self, msg, queue='', allow_duplicate=True): storage = self.setdefault('_f_' + queue, []) if allow_duplicate or (msg not in storage): storage.append(msg) def pop_flash(self, queue=''): storage = self.pop('_f_' + queue, []) return storage def peek_flash(self, queue=''):
pyramid/src/pyramid/testing.py
pyramid.testing.DummySession.new_csrf_token
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ # attributes created = Attribute('Integer representing Epoch time when created.') new = Attribute('Boolean attribute. If ``True``, the session is new.') # special methods def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/src/pyramid/testing.py class DummyResource: """A dummy :app:`Pyramid` :term:`resource` object.""" def __init__( self, __name__=None, __parent__=None, __provides__=None, **kw ): """The resource's ``__name__`` attribute will be set to the value of the ``__name__`` argument, and the resource's ``__parent__`` attribute will be set to the value of the ``__parent__`` argument. If ``__provides__`` is specified, it should be an interface object or tuple of interface objects that will be attached to the resulting resource via :func:`zope.interface.alsoProvides`. Any extra keywords passed in the ``kw`` argument will be set as direct attributes of the resource object. .. note:: For backwards compatibility purposes, this class can also be imported as :class:`pyramid.testing.DummyModel`. """ self.__name__ = __name__ self.__parent__ = __parent__ if __provides__ is not None: alsoProvides(self, __provides__) self.kw = kw self.__dict__.update(**kw) self.subs = {} def __setitem__(self, name, val): """When the ``__setitem__`` method is called, the object passed in as ``val`` will be decorated with a ``__parent__`` attribute pointing at the dummy resource and a ``__name__`` attribute that is the value of ``name``. The value will then be returned when dummy resource's ``__getitem__`` is called with the name ``name```.""" ... def __getitem__(self, name): """Return a named subobject (see ``__setitem__``)""" ... def __delitem__(self, name): ... def get(self, name, default=None): return self.subs.get(name, default) def values(self): """Return the values set by __setitem__""" ... def items(self): """Return the items set by __setitem__""" ... def keys(self): """Return the keys set by __setitem__""" ... def __bool__(self): ... def __len__(self): ... def __contains__(self, name): ... def clone(self, __name__=_marker, __parent__=_marker, **kw): """Create a clone of the resource object. If ``__name__`` or ``__parent__`` arguments are passed, use these values to override the existing ``__name__`` or ``__parent__`` of the resource. If any extra keyword args are passed in via the ``kw`` argument, use these keywords to add to or override existing resource keywords (attributes).""" ... # FILE pyramid/src/pyramid/testing.py class DummyResource: """A dummy :app:`Pyramid` :term:`resource` object.""" def __init__( self, __name__=None, __parent__=None, __provides__=None, **kw ): """The resource's ``__name__`` attribute will be set to the value of the ``__name__`` argument, and the resource's ``__parent__`` attribute will be set to the value of the ``__parent__`` argument. If ``__provides__`` is specified, it should be an interface object or tuple of interface objects that will be attached to the resulting resource via :func:`zope.interface.alsoProvides`. Any extra keywords passed in the ``kw`` argument will be set as direct attributes of the resource object. .. note:: For backwards compatibility purposes, this class can also be imported as :class:`pyramid.testing.DummyModel`. """ self.__name__ = __name__ self.__parent__ = __parent__ if __provides__ is not None: alsoProvides(self, __provides__) self.kw = kw self.__dict__.update(**kw) self.subs = {} def __setitem__(self, name, val): """When the ``__setitem__`` method is called, the object passed in as ``val`` will be decorated with a ``__parent__`` attribute pointing at the dummy resource and a ``__name__`` attribute that is the value of ``name``. The value will then be returned when dummy resource's ``__getitem__`` is called with the name ``name```.""" ... def __getitem__(self, name): """Return a named subobject (see ``__setitem__``)""" ... def __delitem__(self, name): ... def get(self, name, default=None): ... def values(self): """Return the values set by __setitem__""" ... def items(self): """Return the items set by __setitem__""" return self.subs.items() def keys(self): """Return the keys set by __setitem__""" ... def __bool__(self): ... def __len__(self): ... def __contains__(self, name): ... def clone(self, __name__=_marker, __parent__=_marker, **kw): """Create a clone of the resource object. If ``__name__`` or ``__parent__`` arguments are passed, use these values to override the existing ``__name__`` or ``__parent__`` of the resource. If any extra keyword args are passed in via the ``kw`` argument, use these keywords to add to or override existing resource keywords (attributes).""" ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ ... # FILE pyramid/src/pyramid/interfaces.py class ISession(IDict): """An interface representing a session (a web session object, usually accessed via ``request.session``. Keys and values of a session must be JSON-serializable. .. warning:: In :app:`Pyramid` 2.0 the session was changed to only be required to support types that can be serialized using JSON. It's recommended to switch any session implementations to support only JSON and to only store primitive types in sessions. See :ref:`upgrading_session_20` for more information about why this change was made. .. versionchanged:: 1.9 Sessions are no longer required to implement ``get_csrf_token`` and ``new_csrf_token``. CSRF token support was moved to the pluggable :class:`pyramid.interfaces.ICSRFStoragePolicy` configuration hook. .. versionchanged:: 2.0 Sessions now need to be JSON-serializable. This is more strict than the previous requirement of pickleable objects. """ def invalidate(): """Invalidate the session. The action caused by ``invalidate`` is implementation-dependent, but it should have the effect of completely dissociating any data stored in the session with the current request. It might set response values (such as one which clears a cookie), or it might not. An invalidated session may be used after the call to ``invalidate`` with the effect that a new session is created to store the data. This enables workflows requiring an entirely new session, such as in the case of changing privilege levels or preventing fixation attacks. """ ... def changed(): """Mark the session as changed. A user of a session should call this method after he or she mutates a mutable object that is *a value of the session* (it should not be required after mutating the session itself). For example, if the user has stored a dictionary in the session under the key ``foo``, and he or she does ``session['foo'] = {}``, ``changed()`` needn't be called. However, if subsequently he or she does ``session['foo']['a'] = 1``, ``changed()`` must be called for the sessioning machinery to notice the mutation of the internal dictionary.""" ... def flash(msg, queue='', allow_duplicate=True): """Push a flash message onto the end of the flash queue represented by ``queue``. An alternate flash message queue can used by passing an optional ``queue``, which must be a string. If ``allow_duplicate`` is false, if the ``msg`` already exists in the queue, it will not be re-added.""" ... def pop_flash(queue=''): """Pop a queue from the flash storage. The queue is removed from flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash`""" ... def peek_flash(queue=''): """Peek at a queue in the flash storage. The queue remains in flash storage after this message is called. The queue is returned; it is a list of flash messages added by :meth:`pyramid.interfaces.ISession.flash` """ # FILE pyramid/build/lib/pyramid/config/__init__.py class Configurator( """ A Configurator is used to configure a :app:`Pyramid` :term:`application registry`. The Configurator lifecycle can be managed by using a context manager to automatically handle calling :meth:`pyramid.config.Configurator.begin` and :meth:`pyramid.config.Configurator.end` as well as :meth:`pyramid.config.Configurator.commit`. .. code-block:: python with Configurator(settings=settings) as config: config.add_route('home', '/') app = config.make_wsgi_app() If the ``registry`` argument is not ``None``, it must be an instance of the :class:`pyramid.registry.Registry` class representing the registry to configure. If ``registry`` is ``None``, the configurator will create a :class:`pyramid.registry.Registry` instance itself; it will also perform some default configuration that would not otherwise be done. After its construction, the configurator may be used to add further configuration to the registry. .. warning:: If ``registry`` is assigned the above-mentioned class instance, all other constructor arguments are ignored, with the exception of ``package``. If the ``package`` argument is passed, it must be a reference to a Python :term:`package` (e.g. ``sys.modules['thepackage']``) or a :term:`dotted Python name` to the same. This value is used as a basis to convert relative paths passed to various configuration methods, such as methods which accept a ``renderer`` argument, into absolute paths. If ``None`` is passed (the default), the package is assumed to be the Python package in which the *caller* of the ``Configurator`` constructor lives. If the ``root_package`` is passed, it will propagate through the configuration hierarchy as a way for included packages to locate resources relative to the package in which the main ``Configurator`` was created. If ``None`` is passed (the default), the ``root_package`` will be derived from the ``package`` argument. The ``package`` attribute is always pointing at the package being included when using :meth:`.include`, whereas the ``root_package`` does not change. If the ``settings`` argument is passed, it should be a Python dictionary representing the :term:`deployment settings` for this application. These are later retrievable using the :attr:`pyramid.registry.Registry.settings` attribute (aka ``request.registry.settings``). If the ``root_factory`` argument is passed, it should be an object representing the default :term:`root factory` for your application or a :term:`dotted Python name` to the same. If it is ``None``, a default root factory will be used. If ``security_policy`` is passed, it should be an instance of a :term:`security policy` or a :term:`dotted Python name` to the same. If ``authentication_policy`` is passed, it should be an instance of an :term:`authentication policy` or a :term:`dotted Python name` to the same. If ``authorization_policy`` is passed, it should be an instance of an :term:`authorization policy` or a :term:`dotted Python name` to the same. .. note:: A ``ConfigurationError`` will be raised when an authorization policy is supplied without also supplying an authentication policy (authorization requires authentication). If ``renderers`` is ``None`` (the default), a default set of :term:`renderer` factories is used. Else, it should be a list of tuples representing a set of renderer factories which should be configured into this application, and each tuple representing a set of positional values that should be passed to :meth:`pyramid.config.Configurator.add_renderer`. If ``debug_logger`` is not passed, a default debug logger that logs to a logger will be used (the logger name will be the package name of the *caller* of this configurator). If it is passed, it should be an instance of the :class:`logging.Logger` (PEP 282) standard library class or a Python logger name. The debug logger is used by :app:`Pyramid` itself to log warnings and authorization debugging information. If ``locale_negotiator`` is passed, it should be a :term:`locale negotiator` implementation or a :term:`dotted Python name` to same. See :ref:`custom_locale_negotiator`. If ``request_factory`` is passed, it should be a :term:`request factory` implementation or a :term:`dotted Python name` to the same. See :ref:`changing_the_request_factory`. By default it is ``None``, which means use the default request factory. If ``response_factory`` is passed, it should be a :term:`response factory` implementation or a :term:`dotted Python name` to the same. See :ref:`changing_the_response_factory`. By default it is ``None``, which means use the default response factory. If ``default_permission`` is passed, it should be a :term:`permission` string to be used as the default permission for all view configuration registrations performed against this Configurator. An example of a permission string:``'view'``. Adding a default permission makes it unnecessary to protect each view configuration with an explicit permission, unless your application policy requires some exception for a particular view. By default, ``default_permission`` is ``None``, meaning that view configurations which do not explicitly declare a permission will always be executable by entirely anonymous users (any authorization policy in effect is ignored). .. seealso:: See also :ref:`setting_a_default_permission`. If ``session_factory`` is passed, it should be an object which implements the :term:`session factory` interface. If a nondefault value is passed, the ``session_factory`` will be used to create a session object when ``request.session`` is accessed. Note that the same outcome can be achieved by calling :meth:`pyramid.config.Configurator.set_session_factory`. By default, this argument is ``None``, indicating that no session factory will be configured (and thus accessing ``request.session`` will throw an error) unless ``set_session_factory`` is called later during configuration. If ``autocommit`` is ``True``, every method called on the configurator will cause an immediate action, and no configuration conflict detection will be used. If ``autocommit`` is ``False``, most methods of the configurator will defer their action until :meth:`pyramid.config.Configurator.commit` is called. When :meth:`pyramid.config.Configurator.commit` is called, the actions implied by the called methods will be checked for configuration conflicts unless ``autocommit`` is ``True``. If a conflict is detected, a ``ConfigurationConflictError`` will be raised. Calling :meth:`pyramid.config.Configurator.make_wsgi_app` always implies a final commit. If ``default_view_mapper`` is passed, it will be used as the default :term:`view mapper` factory for view configurations that don't otherwise specify one (see :class:`pyramid.interfaces.IViewMapperFactory`). If ``default_view_mapper`` is not passed, a superdefault view mapper will be used. If ``exceptionresponse_view`` is passed, it must be a :term:`view callable` or ``None``. If it is a view callable, it will be used as an exception view callable when an :term:`exception response` is raised. If ``exceptionresponse_view`` is ``None``, no exception response view will be registered, and all raised exception responses will be bubbled up to Pyramid's caller. By default, the ``pyramid.httpexceptions.default_exceptionresponse_view`` function is used as the ``exceptionresponse_view``. If ``route_prefix`` is passed, all routes added with :meth:`pyramid.config.Configurator.add_route` will have the specified path prepended to their pattern. If ``introspection`` is passed, it must be a boolean value. If it's ``True``, introspection values during actions will be kept for use for tools like the debug toolbar. If it's ``False``, introspection values provided by registrations will be ignored. By default, it is ``True``. .. versionadded:: 1.1 The ``exceptionresponse_view`` argument. .. versionadded:: 1.2 The ``route_prefix`` argument. .. versionadded:: 1.3 The ``introspection`` argument. .. versionadded:: 1.6 The ``root_package`` argument. The ``response_factory`` argument. .. versionadded:: 1.9 The ability to use the configurator as a context manager with the ``with``-statement to make threadlocal configuration available for further configuration with an implicit commit. """ def __init__( self, registry=None, package=None, settings=None, root_factory=None, security_policy=None, authentication_policy=None, authorization_policy=None, renderers=None, debug_logger=None, locale_negotiator=None, request_factory=None, response_factory=None, default_permission=None, session_factory=None, default_view_mapper=None, autocommit=False, exceptionresponse_view=default_exceptionresponse_view, route_prefix=None, introspection=True, root_package=None, ): if package is None: package = caller_package() if root_package is None: root_package = package name_resolver = DottedNameResolver(package) self.name_resolver = name_resolver self.package_name = name_resolver.get_package_name() self.package = name_resolver.get_package() self.root_package = root_package self.registry = registry self.autocommit = autocommit self.route_prefix = route_prefix self.introspection = introspection if registry is None: registry = Registry(self.package_name) self.registry = registry self.setup_registry( settings=settings, root_factory=root_factory, authentication_policy=authentication_policy, authorization_policy=authorization_policy, security_policy=security_policy, renderers=renderers, debug_logger=debug_logger, locale_negotiator=locale_negotiator, request_factory=request_factory, response_factory=response_factory, default_permission=default_permission, session_factory=session_factory, default_view_mapper=default_view_mapper, exceptionresponse_view=exceptionresponse_view, ) def setup_registry( """When you pass a non-``None`` ``registry`` argument to the :term:`Configurator` constructor, no initial setup is performed against the registry. This is because the registry you pass in may have already been initialized for use under :app:`Pyramid` via a different configurator. However, in some circumstances (such as when you want to use a global registry instead of a registry created as a result of the Configurator constructor), or when you want to reset the initial setup of a registry, you *do* want to explicitly initialize the registry associated with a Configurator for use under :app:`Pyramid`. Use ``setup_registry`` to do this initialization. ``setup_registry`` configures settings, a root factory, security policies, renderers, a debug logger, a locale negotiator, and various other settings using the configurator's current registry, as per the descriptions in the Configurator constructor.""" ... def _make_spec(self, path_or_spec): ... def _fix_registry(self): """Fix up a ZCA component registry that is not a pyramid.registry.Registry by adding analogues of ``has_listeners``, ``notify``, ``queryAdapterOrSelf``, and ``registerSelfAdapter`` through monkey-patching.""" ... def _get_introspector(self): ... def _set_introspector(self, introspector): ... def _del_introspector(self): ... def include(self, callable, route_prefix=None): """Include a configuration callable, to support imperative application extensibility. .. warning:: In versions of :app:`Pyramid` prior to 1.2, this function accepted ``*callables``, but this has been changed to support only a single callable. A configuration callable should be a callable that accepts a single argument named ``config``, which will be an instance of a :term:`Configurator`. However, be warned that it will not be the same configurator instance on which you call this method. The code which runs as a result of calling the callable should invoke methods on the configurator passed to it which add configuration state. The return value of a callable will be ignored. Values allowed to be presented via the ``callable`` argument to this method: any callable Python object or any :term:`dotted Python name` which resolves to a callable Python object. It may also be a Python :term:`module`, in which case, the module will be searched for a callable named ``includeme``, which will be treated as the configuration callable. For example, if the ``includeme`` function below lives in a module named ``myapp.myconfig``: .. code-block:: python :linenos: # myapp.myconfig module def my_view(request): from pyramid.response import Response return Response('OK') def includeme(config): config.add_view(my_view) You might cause it to be included within your Pyramid application like so: .. code-block:: python :linenos: from pyramid.config import Configurator def main(global_config, **settings): config = Configurator() config.include('myapp.myconfig.includeme') Because the function is named ``includeme``, the function name can also be omitted from the dotted name reference: .. code-block:: python :linenos: from pyramid.config import Configurator def main(global_config, **settings): config = Configurator() config.include('myapp.myconfig') Included configuration statements will be overridden by local configuration statements if an included callable causes a configuration conflict by registering something with the same configuration parameters. If the ``route_prefix`` is supplied, it must be a string and will have a similar effect to using :meth:`pyramid.config.Configurator.route_prefix_context`. Any calls to :meth:`pyramid.config.Configurator.add_route` within the included callable will have their pattern prefixed with the value of ``route_prefix``. This can be used to help mount a set of routes at a different location than the included callable's author intended, while still maintaining the same route names. For example: .. code-block:: python :linenos: from pyramid.config import Configurator def included(config): config.add_route('show_users', '/show') def main(global_config, **settings): config = Configurator() config.include(included, route_prefix='/users') In the above configuration, the ``show_users`` route will have an effective route pattern of ``/users/show``, instead of ``/show`` because the ``route_prefix`` argument will be prepended to the pattern. .. versionadded:: 1.2 The ``route_prefix`` parameter. .. versionchanged:: 1.9 The included function is wrapped with a call to :meth:`pyramid.config.Configurator.begin` and :meth:`pyramid.config.Configurator.end` while it is executed. """ ... def add_directive(self, name, directive, action_wrap=True): """ Add a directive method to the configurator. .. warning:: This method is typically only used by :app:`Pyramid` framework extension authors, not by :app:`Pyramid` application developers. Framework extenders can add directive methods to a configurator by instructing their users to call ``config.add_directive('somename', 'some.callable')``. This will make ``some.callable`` accessible as ``config.somename``. ``some.callable`` should be a function which accepts ``config`` as a first argument, and arbitrary positional and keyword arguments following. It should use config.action as necessary to perform actions. Directive methods can then be invoked like 'built-in' directives such as ``add_view``, ``add_route``, etc. The ``action_wrap`` argument should be ``True`` for directives which perform ``config.action`` with potentially conflicting discriminators. ``action_wrap`` will cause the directive to be wrapped in a decorator which provides more accurate conflict cause information. ``add_directive`` does not participate in conflict detection, and later calls to ``add_directive`` will override earlier calls. """ ... def __getattr__(self, name): ... def with_package(self, package): """Return a new Configurator instance with the same registry as this configurator. ``package`` may be an actual Python package object or a :term:`dotted Python name` representing a package.""" ... def maybe_dotted(self, dotted): """Resolve the :term:`dotted Python name` ``dotted`` to a global Python object. If ``dotted`` is not a string, return it without attempting to do any name resolution. If ``dotted`` is a relative dotted name (e.g. ``.foo.bar``, consider it relative to the ``package`` argument supplied to this Configurator's constructor.""" ... def absolute_asset_spec(self, relative_spec): """Resolve the potentially relative :term:`asset specification` string passed as ``relative_spec`` into an absolute asset specification string and return the string. Use the ``package`` of this configurator as the package to which the asset specification will be considered relative when generating an absolute asset specification. If the provided ``relative_spec`` argument is already absolute, or if the ``relative_spec`` is not a string, it is simply returned.""" ... def begin(self, request=_marker): """Indicate that application or test configuration has begun. This pushes a dictionary containing the :term:`application registry` implied by ``registry`` attribute of this configurator and the :term:`request` implied by the ``request`` argument onto the :term:`thread local` stack consulted by various :mod:`pyramid.threadlocal` API functions. If ``request`` is not specified and the registry owned by the configurator is already pushed as the current threadlocal registry then this method will keep the current threadlocal request unchanged. .. versionchanged:: 1.8 The current threadlocal request is propagated if the current threadlocal registry remains unchanged. """ ... def end(self): """Indicate that application or test configuration has ended. This pops the last value pushed onto the :term:`thread local` stack (usually by the ``begin`` method) and returns that value. """ return self.manager.pop() def __enter__(self): ... def __exit__(self, exc_type, exc_value, exc_traceback): ... def scan( """Scan a Python package and any of its subpackages for objects marked with :term:`configuration decoration` such as :class:`pyramid.view.view_config`. Any decorated object found will influence the current configuration state. The ``package`` argument should be a Python :term:`package` or module object (or a :term:`dotted Python name` which refers to such a package or module). If ``package`` is ``None``, the package of the *caller* is used. The ``categories`` argument, if provided, should be the :term:`Venusian` 'scan categories' to use during scanning. Providing this argument is not often necessary; specifying scan categories is an extremely advanced usage. By default, ``categories`` is ``['pyramid']`` which will execute only :app:`Pyramid`-related Venusian decorator callbacks such as from :class:`pyramid.view.view_config`. See the :term:`Venusian` documentation for more information about limiting a scan by using an explicit set of categories. Pass ``None`` to pick up *all* Venusian decorators. The ``onerror`` argument, if provided, should be a Venusian ``onerror`` callback function. The onerror function is passed to :meth:`venusian.Scanner.scan` to influence error behavior when an exception is raised during the scanning process. See the :term:`Venusian` documentation for more information about ``onerror`` callbacks. The ``ignore`` argument, if provided, should be a Venusian ``ignore`` value. Providing an ``ignore`` argument allows the scan to ignore particular modules, packages, or global objects during a scan. ``ignore`` can be a string or a callable, or a list containing strings or callables. The simplest usage of ``ignore`` is to provide a module or package by providing a full path to its dotted name. For example: ``config.scan(ignore='my.module.subpackage')`` would ignore the ``my.module.subpackage`` package during a scan, which would prevent the subpackage and any of its submodules from being imported and scanned. See the :term:`Venusian` documentation for more information about the ``ignore`` argument. To perform a ``scan``, Pyramid creates a Venusian ``Scanner`` object. The ``kw`` argument represents a set of keyword arguments to pass to the Venusian ``Scanner`` object's constructor. See the :term:`venusian` documentation (its ``Scanner`` class) for more information about the constructor. By default, the only keyword arguments passed to the Scanner constructor are ``{'config':self}`` where ``self`` is this configurator object. This services the requirement of all built-in Pyramid decorators, but extension systems may require additional arguments. Providing this argument is not often necessary; it's an advanced usage. .. versionadded:: 1.1 The ``**kw`` argument. .. versionadded:: 1.3 The ``ignore`` argument. .. versionchanged:: 2.0 The ``categories`` argument now defaults to ``['pyramid']`` instead of ``None`` to control which decorator callbacks are executed. """ ... def make_wsgi_app(self): """Commits any pending configuration statements, sends a :class:`pyramid.events.ApplicationCreated` event to all listeners, adds this configuration's registry to :attr:`pyramid.config.global_registries`, and returns a :app:`Pyramid` WSGI application representing the committed configuration state.""" ... Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/testing.py from contextlib import contextmanager import copy import os from webob.acceptparse import create_accept_header from zope.interface import alsoProvides, implementer from pyramid.config import Configurator from pyramid.decorator import reify from pyramid.i18n import LocalizerRequestMixin from pyramid.interfaces import IRequest, ISession from pyramid.path import caller_package from pyramid.registry import Registry from pyramid.request import CallbackMethodsMixin from pyramid.security import AuthenticationAPIMixin, SecurityAPIMixin from pyramid.threadlocal import get_current_registry, manager from pyramid.url import URLMethodsMixin from pyramid.util import PYPY, InstancePropertyMixin from pyramid.view import ViewMethodsMixin from pyramid.response import _get_response_factory from zope.component import getSiteManager class DummySession(dict): created = None new = True def changed(self): pass def invalidate(self): self.clear() def flash(self, msg, queue='', allow_duplicate=True): storage = self.setdefault('_f_' + queue, []) if allow_duplicate or (msg not in storage): storage.append(msg) def pop_flash(self, queue=''): storage = self.pop('_f_' + queue, []) return storage def peek_flash(self, queue=''): storage = self.get('_f_' + queue, []) return storage def new_csrf_token(self):
pyramid/src/pyramid/testing.py
pyramid.view.view_defaults
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/view.py class notfound_view_config: """ .. versionadded:: 1.3 An analogue of :class:`pyramid.view.view_config` which registers a :term:`Not Found View` using :meth:`pyramid.config.Configurator.add_notfound_view`. The ``notfound_view_config`` constructor accepts most of the same arguments as the constructor of :class:`pyramid.view.view_config`. It can be used in the same places, and behaves in largely the same way, except it always registers a not found exception view instead of a 'normal' view. Example: .. code-block:: python from pyramid.view import notfound_view_config from pyramid.response import Response @notfound_view_config() def notfound(request): return Response('Not found!', status='404 Not Found') All arguments except ``append_slash`` have the same meaning as :meth:`pyramid.view.view_config` and each predicate argument restricts the set of circumstances under which this notfound view will be invoked. If ``append_slash`` is ``True``, when the Not Found View is invoked, and the current path info does not end in a slash, the notfound logic will attempt to find a :term:`route` that matches the request's path info suffixed with a slash. If such a route exists, Pyramid will issue a redirect to the URL implied by the route; if it does not, Pyramid will return the result of the view callable provided as ``view``, as normal. If the argument provided as ``append_slash`` is not a boolean but instead implements :class:`~pyramid.interfaces.IResponse`, the append_slash logic will behave as if ``append_slash=True`` was passed, but the provided class will be used as the response class instead of the default :class:`~pyramid.httpexceptions.HTTPTemporaryRedirect` response class when a redirect is performed. For example: .. code-block:: python from pyramid.httpexceptions import ( HTTPMovedPermanently, HTTPNotFound ) @notfound_view_config(append_slash=HTTPMovedPermanently) def aview(request): return HTTPNotFound('not found') The above means that a redirect to a slash-appended route will be attempted, but instead of :class:`~pyramid.httpexceptions.HTTPTemporaryRedirect` being used, :class:`~pyramid.httpexceptions.HTTPMovedPermanently will be used` for the redirect response if a slash-appended route is found. See :ref:`changing_the_notfound_view` for detailed usage information. .. versionchanged:: 1.9.1 Added the ``_depth`` and ``_category`` arguments. """ def __init__(self, **settings): self.__dict__.update(settings) def __call__(self, wrapped): settings = self.__dict__.copy() depth = settings.pop('_depth', 0) category = settings.pop('_category', 'pyramid') def callback(context, name, ob): config = context.config.with_package(info.module) config.add_notfound_view(view=ob, **settings) info = self.venusian.attach( wrapped, callback, category=category, depth=depth + 1 ) if info.scope == 'class': # if the decorator was attached to a method in a class, or # otherwise executed at class scope, we need to set an # 'attr' into the settings if one isn't already in there if settings.get('attr') is None: settings['attr'] = wrapped.__name__ settings['_info'] = info.codeinfo # fbo "action_method" return wrapped # FILE pyramid/src/pyramid/view.py class exception_view_config: """ .. versionadded:: 1.8 An analogue of :class:`pyramid.view.view_config` which registers an :term:`exception view` using :meth:`pyramid.config.Configurator.add_exception_view`. The ``exception_view_config`` constructor requires an exception context, and additionally accepts most of the same arguments as the constructor of :class:`pyramid.view.view_config`. It can be used in the same places, and behaves in largely the same way, except it always registers an exception view instead of a "normal" view that dispatches on the request :term:`context`. Example: .. code-block:: python from pyramid.view import exception_view_config from pyramid.response import Response @exception_view_config(ValueError, renderer='json') def error_view(request): return {'error': str(request.exception)} All arguments passed to this function have the same meaning as :meth:`pyramid.view.view_config`, and each predicate argument restricts the set of circumstances under which this exception view will be invoked. .. versionchanged:: 1.9.1 Added the ``_depth`` and ``_category`` arguments. """ venusian = venusian def __init__(self, *args, **settings): if 'context' not in settings and len(args) > 0: exception, args = args[0], args[1:] settings['context'] = exception if len(args) > 0: raise ConfigurationError('unknown positional arguments') self.__dict__.update(settings) def __call__(self, wrapped): settings = self.__dict__.copy() depth = settings.pop('_depth', 0) category = settings.pop('_category', 'pyramid') def callback(context, name, ob): config = context.config.with_package(info.module) config.add_exception_view(view=ob, **settings) info = self.venusian.attach( wrapped, callback, category=category, depth=depth + 1 ) if info.scope == 'class': # if the decorator was attached to a method in a class, or # otherwise executed at class scope, we need to set an # 'attr' in the settings if one isn't already in there if settings.get('attr') is None: settings['attr'] = wrapped.__name__ settings['_info'] = info.codeinfo # fbo "action_method" return wrapped # FILE pyramid/src/pyramid/view.py class view_config: """A function, class or method :term:`decorator` which allows a developer to create view registrations nearer to a :term:`view callable` definition than use :term:`imperative configuration` to do the same. For example, this code in a module ``views.py``:: from resources import MyResource @view_config(name='my_view', context=MyResource, permission='read', route_name='site1') def my_view(context, request): return 'OK' Might replace the following call to the :meth:`pyramid.config.Configurator.add_view` method:: import views from resources import MyResource config.add_view(views.my_view, context=MyResource, name='my_view', permission='read', route_name='site1') .. note: :class:`pyramid.view.view_config` is also importable, for backwards compatibility purposes, as the name :class:`pyramid.view.bfg_view`. :class:`pyramid.view.view_config` supports the following keyword arguments: ``context``, ``exception``, ``permission``, ``name``, ``request_type``, ``route_name``, ``request_method``, ``request_param``, ``containment``, ``xhr``, ``accept``, ``header``, ``path_info``, ``custom_predicates``, ``decorator``, ``mapper``, ``http_cache``, ``require_csrf``, ``match_param``, ``physical_path``, and ``view_options``. The meanings of these arguments are the same as the arguments passed to :meth:`pyramid.config.Configurator.add_view`. If any argument is left out, its default will be the equivalent ``add_view`` default. Two additional keyword arguments which will be passed to the :term:`venusian` ``attach`` function are ``_depth`` and ``_category``. ``_depth`` is provided for people who wish to reuse this class from another decorator. The default value is ``0`` and should be specified relative to the ``view_config`` invocation. It will be passed in to the :term:`venusian` ``attach`` function as the depth of the callstack when Venusian checks if the decorator is being used in a class or module context. It's not often used, but it can be useful in this circumstance. ``_category`` sets the decorator category name. It can be useful in combination with the ``category`` argument of ``scan`` to control which views should be processed. See the :py:func:`venusian.attach` function in Venusian for more information about the ``_depth`` and ``_category`` arguments. .. seealso:: See also :ref:`mapping_views_using_a_decorator_section` for details about using :class:`pyramid.view.view_config`. .. note:: Because of a limitation with ``venusian.Scanner.scan``, note that ``view_config`` will work only for the following conditions. - In Python packages that have an ``__init__.py`` file in their directory. .. seealso:: See also https://github.com/Pylons/venusian/issues/68 - On module top level members. - On Python source (``.py``) files. Compiled Python files (``.pyc``, ``.pyo``) without a corresponding source file are ignored. .. seealso:: See also the `Venusian documentation <https://docs.pylonsproject.org/projects/venusian/en/latest/#using-venusian>`_. """ def __init__(self, **settings): if 'for_' in settings: if settings.get('context') is None: settings['context'] = settings['for_'] self.__dict__.update(settings) self._get_info() def _get_info(self): ... def __call__(self, wrapped): settings = self.__dict__.copy() depth = settings.pop('_depth', 0) category = settings.pop('_category', 'pyramid') def callback(context, name, ob): config = context.config.with_package(info.module) config.add_view(view=ob, **settings) info = self.venusian.attach( wrapped, callback, category=category, depth=depth + 1 ) if info.scope == 'class': # if the decorator was attached to a method in a class, or # otherwise executed at class scope, we need to set an # 'attr' into the settings if one isn't already in there if settings.get('attr') is None: settings['attr'] = wrapped.__name__ return wrapped # FILE pyramid/src/pyramid/view.py class view_config: """A function, class or method :term:`decorator` which allows a developer to create view registrations nearer to a :term:`view callable` definition than use :term:`imperative configuration` to do the same. For example, this code in a module ``views.py``:: from resources import MyResource @view_config(name='my_view', context=MyResource, permission='read', route_name='site1') def my_view(context, request): return 'OK' Might replace the following call to the :meth:`pyramid.config.Configurator.add_view` method:: import views from resources import MyResource config.add_view(views.my_view, context=MyResource, name='my_view', permission='read', route_name='site1') .. note: :class:`pyramid.view.view_config` is also importable, for backwards compatibility purposes, as the name :class:`pyramid.view.bfg_view`. :class:`pyramid.view.view_config` supports the following keyword arguments: ``context``, ``exception``, ``permission``, ``name``, ``request_type``, ``route_name``, ``request_method``, ``request_param``, ``containment``, ``xhr``, ``accept``, ``header``, ``path_info``, ``custom_predicates``, ``decorator``, ``mapper``, ``http_cache``, ``require_csrf``, ``match_param``, ``physical_path``, and ``view_options``. The meanings of these arguments are the same as the arguments passed to :meth:`pyramid.config.Configurator.add_view`. If any argument is left out, its default will be the equivalent ``add_view`` default. Two additional keyword arguments which will be passed to the :term:`venusian` ``attach`` function are ``_depth`` and ``_category``. ``_depth`` is provided for people who wish to reuse this class from another decorator. The default value is ``0`` and should be specified relative to the ``view_config`` invocation. It will be passed in to the :term:`venusian` ``attach`` function as the depth of the callstack when Venusian checks if the decorator is being used in a class or module context. It's not often used, but it can be useful in this circumstance. ``_category`` sets the decorator category name. It can be useful in combination with the ``category`` argument of ``scan`` to control which views should be processed. See the :py:func:`venusian.attach` function in Venusian for more information about the ``_depth`` and ``_category`` arguments. .. seealso:: See also :ref:`mapping_views_using_a_decorator_section` for details about using :class:`pyramid.view.view_config`. .. note:: Because of a limitation with ``venusian.Scanner.scan``, note that ``view_config`` will work only for the following conditions. - In Python packages that have an ``__init__.py`` file in their directory. .. seealso:: See also https://github.com/Pylons/venusian/issues/68 - On module top level members. - On Python source (``.py``) files. Compiled Python files (``.pyc``, ``.pyo``) without a corresponding source file are ignored. .. seealso:: See also the `Venusian documentation <https://docs.pylonsproject.org/projects/venusian/en/latest/#using-venusian>`_. """ venusian = venusian # for testing injection def __init__(self, **settings): if 'for_' in settings: if settings.get('context') is None: settings['context'] = settings['for_'] self.__dict__.update(settings) self._get_info() def _get_info(self): depth = self.__dict__.get('_depth', 0) frame = sys._getframe(depth + 2) frameinfo = inspect.getframeinfo(frame) sourceline = frameinfo[3][0].strip() self._info = frameinfo[0], frameinfo[1], frameinfo[2], sourceline def __call__(self, wrapped): settings = self.__dict__.copy() depth = settings.pop('_depth', 0) category = settings.pop('_category', 'pyramid') def callback(context, name, ob): config = context.config.with_package(info.module) config.add_view(view=ob, **settings) info = self.venusian.attach( wrapped, callback, category=category, depth=depth + 1 ) if info.scope == 'class': # if the decorator was attached to a method in a class, or # otherwise executed at class scope, we need to set an # 'attr' into the settings if one isn't already in there if settings.get('attr') is None: settings['attr'] = wrapped.__name__ return wrapped Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/view.py import inspect import itertools import sys import venusian from zope.interface import providedBy from pyramid.exceptions import ConfigurationError, PredicateMismatch from pyramid.httpexceptions import HTTPNotFound, HTTPTemporaryRedirect, default_exceptionresponse_view from pyramid.interfaces import IExceptionViewClassifier, IMultiView, IRequest, IRoutesMapper, ISecuredView, IView, IViewClassifier from pyramid.threadlocal import get_current_registry, manager from pyramid.util import hide_attrs, reraise as reraise_ def view_defaults(**settings): """A class :term:`decorator` which, when applied to a class, will provide defaults for all view configurations that use the class. This decorator accepts all the arguments accepted by :meth:`pyramid.view.view_config`, and each has the same meaning. See :ref:`view_defaults` for more information. """
pyramid/src/pyramid/view.py
pyramid.util.bytes_
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/util.py def get_callable_name(name): """ Verifies that the ``name`` is ascii and will raise a ``ConfigurationError`` if it is not. """ try: return ascii_(name) except (UnicodeEncodeError, UnicodeDecodeError): # avoid circular dependency from pyramid.exceptions import ConfigurationError msg = ( '`name="%s"` is invalid. `name` must be ascii because it is ' 'used on __name__ of the method' ) raise ConfigurationError(msg % name) # FILE pyramid/src/pyramid/util.py def ascii_(s): """ If ``s`` is an instance of ``str``, return ``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')`` """ if isinstance(s, str): s = s.encode('ascii') return str(s, 'ascii', 'strict') # FILE pyramid/src/pyramid/util.py def text_(s, encoding='latin-1', errors='strict'): """If ``s`` is an instance of ``bytes``, return ``s.decode(encoding, errors)``, otherwise return ``s``""" if isinstance(s, bytes): return s.decode(encoding, errors) return s # FILE pyramid/src/pyramid/util.py class WeakOrderedSet: """Maintain a set of items. Each item is stored as a weakref to avoid extending their lifetime. The values may be iterated over or the last item added may be accessed via the ``last`` property. If items are added more than once, the most recent addition will be remembered in the order: order = WeakOrderedSet() order.add('1') order.add('2') order.add('1') list(order) == ['2', '1'] order.last == '1' """ def __init__(self): self._items = {} self._order = [] def add(self, item): """Add an item to the set.""" ... def _remove_by_id(self, oid): """Remove an item from the set.""" ... def remove(self, item): """Remove an item from the set.""" ... def empty(self): """Clear all objects from the set.""" self._items = {} self._order = [] def __len__(self): ... def __contains__(self, item): ... def __iter__(self): ... def last(self): ... Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/util.py from contextlib import contextmanager import functools from hmac import compare_digest import inspect import platform import weakref from pyramid.path import DottedNameResolver as _DottedNameResolver import __pypy__ import pyramid.decorator from pyramid.exceptions import ConfigurationError from pyramid.exceptions import ConfigurationError from pyramid.exceptions import CyclicDependencyError from pyramid.exceptions import ConfigurationError def bytes_(s, encoding='latin-1', errors='strict'): """If ``s`` is an instance of ``str``, return ``s.encode(encoding, errors)``, otherwise return ``s``"""
pyramid/src/pyramid/util.py
pyramid.scripts.common.parse_vars
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/scripts/common.py import plaster def parse_vars(args): """ Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}`` """
pyramid/src/pyramid/scripts/common.py
pyramid.scripts.pviews.PViewsCommand._find_multi_routes
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/traversal.py class ResourceTreeTraverser: """A resource tree traverser that should be used (for speed) when every resource in the tree supplies a ``__name__`` and ``__parent__`` attribute (ie. every resource in the tree is :term:`location` aware) .""" def __init__(self, root): self.root = root def __call__(self, request): environ = request.environ matchdict = request.matchdict if matchdict is not None: path = matchdict.get('traverse', '/') or '/' if is_nonstr_iter(path): # this is a *traverse stararg (not a {traverse}) # routing has already decoded these elements, so we just # need to join them path = '/' + '/'.join(path) or '/' subpath = matchdict.get('subpath', ()) if not is_nonstr_iter(subpath): # this is not a *subpath stararg (just a {subpath}) # routing has already decoded this string, so we just need # to split it subpath = split_path_info(subpath) else: # this request did not match a route subpath = () try: # empty if mounted under a path in mod_wsgi, for example path = request.path_info or '/' except KeyError: # if environ['PATH_INFO'] is just not there path = '/' except UnicodeDecodeError as e: raise URLDecodeError( e.encoding, e.object, e.start, e.end, e.reason ) if self.VH_ROOT_KEY in environ: # HTTP_X_VHM_ROOT vroot_path = decode_path_info(environ[self.VH_ROOT_KEY]) vroot_tuple = split_path_info(vroot_path) vpath = ( vroot_path + path ) # both will (must) be unicode or asciistr vroot_idx = len(vroot_tuple) - 1 else: vroot_tuple = () vpath = path vroot_idx = -1 root = self.root ob = vroot = root if vpath == '/': # invariant: vpath must not be empty # prevent a call to traversal_path if we know it's going # to return the empty tuple vpath_tuple = () else: # we do dead reckoning here via tuple slicing instead of # pushing and popping temporary lists for speed purposes # and this hurts readability; apologies i = 0 view_selector = self.VIEW_SELECTOR vpath_tuple = split_path_info(vpath) for segment in vpath_tuple: if segment[:2] == view_selector: return { 'context': ob, 'view_name': segment[2:], 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } try: getitem = ob.__getitem__ except AttributeError: return { 'context': ob, 'view_name': segment, 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } try: next = getitem(segment) except KeyError: return { 'context': ob, 'view_name': segment, 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } if i == vroot_idx: vroot = next ob = next i += 1 return { 'context': ob, 'view_name': '', 'subpath': subpath, 'traversed': vpath_tuple, 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } # LIB zope def providedBy(ob): """ Return the interfaces provided by *ob*. If *ob* is a :class:`super` object, then only interfaces implemented by the remainder of the classes in the method resolution order are considered. Interfaces directly provided by the object underlying *ob* are not. """ # Here we have either a special object, an old-style declaration # or a descriptor # Try to get __providedBy__ try: if isinstance(ob, super): # Some objects raise errors on isinstance() return implementedBy(ob) r = ob.__providedBy__ except AttributeError: # Not set yet. Fall back to lower-level thing that computes it return getObjectSpecification(ob) try: # We might have gotten a descriptor from an instance of a # class (like an ExtensionClass) that doesn't support # descriptors. We'll make sure we got one by trying to get # the only attribute, which all specs have. r.extends except AttributeError: # The object's class doesn't understand descriptors. # Sigh. We need to get an object descriptor, but we have to be # careful. We want to use the instance's __provides__, if # there is one, but only if it didn't come from the class. try: r = ob.__provides__ except AttributeError: # No __provides__, so just fall back to implementedBy return implementedBy(ob.__class__) # We need to make sure we got the __provides__ from the # instance. We'll do this by making sure we don't get the same # thing from the class: try: cp = ob.__class__.__provides__ except AttributeError: # The ob doesn't have a class or the class has no # provides, assume we're done: return r if r is cp: # Oops, we got the provides from the class. This means # the object doesn't have it's own. We should use implementedBy return implementedBy(ob.__class__) return r # FILE pyramid/src/pyramid/traversal.py class ResourceTreeTraverser: """A resource tree traverser that should be used (for speed) when every resource in the tree supplies a ``__name__`` and ``__parent__`` attribute (ie. every resource in the tree is :term:`location` aware) .""" VH_ROOT_KEY = VH_ROOT_KEY VIEW_SELECTOR = '@@' def __init__(self, root): self.root = root def __call__(self, request): environ = request.environ matchdict = request.matchdict if matchdict is not None: path = matchdict.get('traverse', '/') or '/' if is_nonstr_iter(path): # this is a *traverse stararg (not a {traverse}) # routing has already decoded these elements, so we just # need to join them path = '/' + '/'.join(path) or '/' subpath = matchdict.get('subpath', ()) if not is_nonstr_iter(subpath): # this is not a *subpath stararg (just a {subpath}) # routing has already decoded this string, so we just need # to split it subpath = split_path_info(subpath) else: # this request did not match a route subpath = () try: # empty if mounted under a path in mod_wsgi, for example path = request.path_info or '/' except KeyError: # if environ['PATH_INFO'] is just not there path = '/' except UnicodeDecodeError as e: raise URLDecodeError( e.encoding, e.object, e.start, e.end, e.reason ) if self.VH_ROOT_KEY in environ: # HTTP_X_VHM_ROOT vroot_path = decode_path_info(environ[self.VH_ROOT_KEY]) vroot_tuple = split_path_info(vroot_path) vpath = ( vroot_path + path ) # both will (must) be unicode or asciistr vroot_idx = len(vroot_tuple) - 1 else: vroot_tuple = () vpath = path vroot_idx = -1 root = self.root ob = vroot = root if vpath == '/': # invariant: vpath must not be empty # prevent a call to traversal_path if we know it's going # to return the empty tuple vpath_tuple = () else: # we do dead reckoning here via tuple slicing instead of # pushing and popping temporary lists for speed purposes # and this hurts readability; apologies i = 0 view_selector = self.VIEW_SELECTOR vpath_tuple = split_path_info(vpath) for segment in vpath_tuple: if segment[:2] == view_selector: return { 'context': ob, 'view_name': segment[2:], 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } try: getitem = ob.__getitem__ except AttributeError: return { 'context': ob, 'view_name': segment, 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } try: next = getitem(segment) except KeyError: return { 'context': ob, 'view_name': segment, 'subpath': vpath_tuple[i + 1 :], 'traversed': vpath_tuple[: vroot_idx + i + 1], 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } if i == vroot_idx: vroot = next ob = next i += 1 return { 'context': ob, 'view_name': '', 'subpath': subpath, 'traversed': vpath_tuple, 'virtual_root': vroot, 'virtual_root_path': vroot_tuple, 'root': root, } # FILE pyramid/build/lib/pyramid/request.py class Request( BaseRequest, URLMethodsMixin, CallbackMethodsMixin, InstancePropertyMixin, LocalizerRequestMixin, SecurityAPIMixin, AuthenticationAPIMixin, ViewMethodsMixin, ): """ A subclass of the :term:`WebOb` Request class. An instance of this class is created by the :term:`router` and is provided to a view callable (and to other subsystems) as the ``request`` argument. The documentation below (save for the ``add_response_callback`` and ``add_finished_callback`` methods, which are defined in this subclass itself, and the attributes ``context``, ``registry``, ``root``, ``subpath``, ``traversed``, ``view_name``, ``virtual_root`` , and ``virtual_root_path``, each of which is added to the request by the :term:`router` at request ingress time) are autogenerated from the WebOb source code used when this documentation was generated. Due to technical constraints, we can't yet display the WebOb version number from which this documentation is autogenerated, but it will be the 'prevailing WebOb version' at the time of the release of this :app:`Pyramid` version. See https://webob.org/ for further information. """ exception = None exc_info = None matchdict = None matched_route = None request_iface = IRequest ResponseClass = Response @reify def tmpl_context(self): # docs-deprecated template context for Pylons-like apps; do not # remove. return TemplateContext() @reify def session(self): """Obtain the :term:`session` object associated with this request. If a :term:`session factory` has not been registered during application configuration, a :class:`pyramid.exceptions.ConfigurationError` will be raised""" from pyramid.interfaces import ISessionFactory factory = self.registry.queryUtility(ISessionFactory) if factory is None: raise AttributeError( 'No session factory registered ' '(see the Sessions chapter of the Pyramid documentation)' ) return factory(self) @reify def response(self): """This attribute is actually a "reified" property which returns an instance of the :class:`pyramid.response.Response`. class. The response object returned does not exist until this attribute is accessed. Subsequent accesses will return the same Response object. The ``request.response`` API is used by renderers. A render obtains the response object it will return from a view that uses that renderer by accessing ``request.response``. Therefore, it's possible to use the ``request.response`` API to set up a response object with "the right" attributes (e.g. by calling ``request.response.set_cookie()``) within a view that uses a renderer. Mutations to this response object will be preserved in the response sent to the client.""" response_factory = _get_response_factory(self.registry) return response_factory(self) def is_response(self, ob): """Return ``True`` if the object passed as ``ob`` is a valid response object, ``False`` otherwise.""" if ob.__class__ is Response: return True registry = self.registry adapted = registry.queryAdapterOrSelf(ob, IResponse) if adapted is None: return False return adapted is ob # LIB zope class implementer: """ Declare the interfaces implemented by instances of a class. This function is called as a class decorator. The arguments are one or more interfaces or interface specifications (`~zope.interface.interfaces.IDeclaration` objects). The interfaces given (including the interfaces in the specifications) are added to any interfaces previously declared, unless the interface is already implemented. Previous declarations include declarations for base classes unless implementsOnly was used. This function is provided for convenience. It provides a more convenient way to call `classImplements`. For example:: @implementer(I1) class C(object): pass is equivalent to calling:: classImplements(C, I1) after the class has been created. .. seealso:: `classImplements` The change history provided there applies to this function too. """ __slots__ = ('interfaces',) def __init__(self, *interfaces): self.interfaces = interfaces def __call__(self, ob): if isinstance(ob, type): # This is the common branch for classes. classImplements(ob, *self.interfaces) return ob spec_name = _implements_name(ob) spec = Implements.named(spec_name, *self.interfaces) try: ob.__implemented__ = spec except AttributeError: raise TypeError("Can't declare implements", ob) return ob # FILE pyramid/src/pyramid/interfaces.py class ITraverser(Interface): def __call__(request): """Return a dictionary with (at least) the keys ``root``, ``context``, ``view_name``, ``subpath``, ``traversed``, ``virtual_root``, and ``virtual_root_path``. These values are typically the result of an object graph traversal. ``root`` is the physical root object, ``context`` will be a model object, ``view_name`` will be the view name used (a Unicode name), ``subpath`` will be a sequence of Unicode names that followed the view name but were not traversed, ``traversed`` will be a sequence of Unicode names that were traversed (including the virtual root path, if any) ``virtual_root`` will be a model object representing the virtual root (or the physical root if traversal was not performed), and ``virtual_root_path`` will be a sequence representing the virtual root path (a sequence of Unicode names) or ``None`` if traversal was not performed. Extra keys for special purpose functionality can be returned as necessary. All values returned in the dictionary will be made available as attributes of the ``request`` object by the :term:`router`. """ # FILE pyramid/src/pyramid/scripts/pviews.py def main(argv=sys.argv, quiet=False): command = PViewsCommand(argv, quiet) return command.run() # FILE pyramid/src/pyramid/interfaces.py class IRootFactory(Interface): def __call__(request): """Return a root object based on the request""" # FILE pyramid/src/pyramid/interfaces.py class IRoutesMapper(Interface): """Interface representing a Routes ``Mapper`` object""" def get_routes(): """Return a sequence of Route objects registered in the mapper. Static routes will not be returned in this sequence.""" def has_routes(): """Returns ``True`` if any route has been registered.""" def get_route(name): """Returns an ``IRoute`` object if a route with the name ``name`` was registered, otherwise return ``None``.""" def connect( name, pattern, factory=None, predicates=(), pregenerator=None, static=True, ): """Add a new route.""" def generate(name, kw): """Generate a URL using the route named ``name`` with the keywords implied by kw""" def __call__(request): """Return a dictionary containing matching information for the request; the ``route`` key of this dictionary will either be a Route object or ``None`` if no route matched; the ``match`` key will be the matchdict or ``None`` if no route matched. Static routes will not be considered for matching.""" # FILE pyramid/src/pyramid/paster.py def bootstrap(config_uri, request=None, options=None): """Load a WSGI application from the PasteDeploy config file specified by ``config_uri``. The environment will be configured as if it is currently serving ``request``, leaving a natural environment in place to write scripts that can generate URLs and utilize renderers. This function returns a dictionary with ``app``, ``root``, ``closer``, ``request``, and ``registry`` keys. ``app`` is the WSGI app loaded (based on the ``config_uri``), ``root`` is the traversal root resource of the Pyramid application, and ``closer`` is a parameterless callback that may be called when your script is complete (it pops a threadlocal stack). .. note:: Most operations within :app:`Pyramid` expect to be invoked within the context of a WSGI request, thus it's important when loading your application to anchor it when executing scripts and other code that is not normally invoked during active WSGI requests. .. note:: For a complex config file containing multiple :app:`Pyramid` applications, this function will setup the environment under the context of the last-loaded :app:`Pyramid` application. You may load a specific application yourself by using the lower-level functions :meth:`pyramid.paster.get_app` and :meth:`pyramid.scripting.prepare` in conjunction with :attr:`pyramid.config.global_registries`. ``config_uri`` -- specifies the PasteDeploy config file to use for the interactive shell. The format is ``inifile#name``. If the name is left off, ``main`` will be assumed. ``request`` -- specified to anchor the script to a given set of WSGI parameters. For example, most people would want to specify the host, scheme and port such that their script will generate URLs in relation to those parameters. A request with default parameters is constructed for you if none is provided. You can mutate the request's ``environ`` later to setup a specific host/port/scheme/etc. ``options`` Is passed to get_app for use as variable assignments like {'http_port': 8080} and then use %(http_port)s in the config file. This function may be used as a context manager to call the ``closer`` automatically: .. code-block:: python with bootstrap('development.ini') as env: request = env['request'] # ... See :ref:`writing_a_script` for more information about how to use this function. .. versionchanged:: 1.8 Added the ability to use the return value as a context manager. .. versionchanged:: 2.0 Request finished callbacks added via :meth:`pyramid.request.Request.add_finished_callback` will be invoked by the ``closer``. """ app = get_app(config_uri, options=options) env = prepare(request) env['app'] = app return env # FILE pyramid/src/pyramid/paster.py def setup_logging(config_uri, global_conf=None): """ Set up Python logging with the filename specified via ``config_uri`` (a string in the form ``filename#sectionname``). Extra defaults can optionally be specified as a dict in ``global_conf``. """ loader = get_config_loader(config_uri) loader.setup_logging(global_conf) # FILE pyramid/src/pyramid/scripts/common.py def parse_vars(args): """ Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}`` """ result = {} for arg in args: if '=' not in arg: raise ValueError('Variable assignment %r invalid (no "=")' % arg) name, value = arg.split('=', 1) result[name] = value return result # FILE pyramid/src/pyramid/view.py def _find_views( registry, request_iface, context_iface, view_name, view_types=None, view_classifier=None, ): if view_types is None: view_types = (IView, ISecuredView, IMultiView) if view_classifier is None: view_classifier = IViewClassifier registered = registry.adapters.registered cache = registry._view_lookup_cache views = cache.get((request_iface, context_iface, view_name)) if views is None: views = [] for req_type, ctx_type in itertools.product( request_iface.__sro__, context_iface.__sro__ ): source_ifaces = (view_classifier, req_type, ctx_type) for view_type in view_types: view_callable = registered( source_ifaces, view_type, name=view_name ) if view_callable is not None: views.append(view_callable) if views: # do not cache view lookup misses. rationale: dont allow cache to # grow without bound if somebody tries to hit the site with many # missing URLs. we could use an LRU cache instead, but then # purposeful misses by an attacker would just blow out the cache # anyway. downside: misses will almost always consume more CPU than # hits in steady state. with registry._lock: cache[(request_iface, context_iface, view_name)] = views return views # FILE pyramid/src/pyramid/interfaces.py class IRoutesMapper(Interface): """Interface representing a Routes ``Mapper`` object""" def get_routes(): """Return a sequence of Route objects registered in the mapper. Static routes will not be returned in this sequence.""" def has_routes(): """Returns ``True`` if any route has been registered.""" ... def get_route(name): """Returns an ``IRoute`` object if a route with the name ``name`` was registered, otherwise return ``None``.""" ... def connect( """Add a new route.""" ... def generate(name, kw): """Generate a URL using the route named ``name`` with the keywords implied by kw""" ... def __call__(request): """Return a dictionary containing matching information for the request; the ``route`` key of this dictionary will either be a Route object or ``None`` if no route matched; the ``match`` key will be the matchdict or ``None`` if no route matched. Static routes will not be considered for matching.""" ... # FILE pyramid/src/pyramid/interfaces.py class IRoutesMapper(Interface): """Interface representing a Routes ``Mapper`` object""" def get_routes(): """Return a sequence of Route objects registered in the mapper. Static routes will not be returned in this sequence.""" ... def has_routes(): """Returns ``True`` if any route has been registered.""" ... def get_route(name): """Returns an ``IRoute`` object if a route with the name ``name`` was registered, otherwise return ``None``.""" def connect( """Add a new route.""" ... def generate(name, kw): """Generate a URL using the route named ``name`` with the keywords implied by kw""" ... def __call__(request): """Return a dictionary containing matching information for the request; the ``route`` key of this dictionary will either be a Route object or ``None`` if no route matched; the ``match`` key will be the matchdict or ``None`` if no route matched. Static routes will not be considered for matching.""" ... Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/scripts/pviews.py import argparse import sys import textwrap from pyramid.interfaces import IMultiView from pyramid.paster import bootstrap, setup_logging from pyramid.request import Request from pyramid.scripts.common import parse_vars from pyramid.view import _find_views from zope.interface import implementer, providedBy from pyramid.interfaces import IRequest, IRootFactory, IRouteRequest, IRoutesMapper, ITraverser from pyramid.traversal import DefaultRootFactory, ResourceTreeTraverser class PViewsCommand: description = """\ Print, for a given URL, the views that might match. Underneath each potentially matching route, list the predicates required. Underneath each route+predicate set, print each view that might match and its predicates. This command accepts two positional arguments: 'config_uri' specifies the PasteDeploy config file to use for the interactive shell. The format is 'inifile#name'. If the name is left off, 'main' will be assumed. 'url' specifies the path info portion of a URL that will be used to find matching views. Example: 'proutes myapp.ini#main /url' """ stdout = sys.stdout parser = argparse.ArgumentParser( description=textwrap.dedent(description), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( 'config_uri', nargs='?', default=None, help='The URI to the configuration file.', ) parser.add_argument( 'url', nargs='?', default=None, help='The path info portion of the URL.', ) parser.add_argument( 'config_vars', nargs='*', default=(), help="Variables required by the config file. For example, " "`http_port=%%(http_port)s` would expect `http_port=8080` to be " "passed here.", ) bootstrap = staticmethod(bootstrap) # testing setup_logging = staticmethod(setup_logging) # testing def __init__(self, argv, quiet=False): self.quiet = quiet self.args = self.parser.parse_args(argv[1:]) def out(self, msg): # pragma: no cover if not self.quiet: print(msg) def _find_multi_routes(self, mapper, request):
pyramid/src/pyramid/scripts/pviews.py
pyramid.scripts.pserve.PServeCommand.guess_server_url
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/scripts/pserve.py def cherrypy_server_runner( app, global_conf=None, host='127.0.0.1', port=None, ssl_pem=None, protocol_version=None, numthreads=None, server_name=None, max=None, request_queue_size=None, timeout=None, ): # pragma: no cover """ Entry point for CherryPy's WSGI server Serves the specified WSGI app via CherryPyWSGIServer. ``app`` The WSGI 'application callable'; multiple WSGI applications may be passed as (script_name, callable) pairs. ``host`` This is the ipaddress to bind to (or a hostname if your nameserver is properly configured). This defaults to 127.0.0.1, which is not a public interface. ``port`` The port to run on, defaults to 8080 for HTTP, or 4443 for HTTPS. This can be a string or an integer value. ``ssl_pem`` This an optional SSL certificate file (via OpenSSL) You can generate a self-signed test PEM certificate file as follows: $ openssl genrsa 1024 > host.key $ chmod 400 host.key $ openssl req -new -x509 -nodes -sha1 -days 365 \\ -key host.key > host.cert $ cat host.cert host.key > host.pem $ chmod 400 host.pem ``protocol_version`` The protocol used by the server, by default ``HTTP/1.1``. ``numthreads`` The number of worker threads to create. ``server_name`` The string to set for WSGI's SERVER_NAME environ entry. ``max`` The maximum number of queued requests. (defaults to -1 = no limit). ``request_queue_size`` The 'backlog' argument to socket.listen(); specifies the maximum number of queued connections. ``timeout`` The timeout in seconds for accepted connections. """ is_ssl = False if ssl_pem: port = port or 4443 is_ssl = True if not port: if ':' in host: host, port = host.split(':', 1) else: port = 8080 bind_addr = (host, int(port)) kwargs = {} for var_name in ('numthreads', 'max', 'request_queue_size', 'timeout'): var = locals()[var_name] if var is not None: kwargs[var_name] = int(var) try: from cheroot.wsgi import Server as WSGIServer except ImportError: from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer server = WSGIServer(bind_addr, app, server_name=server_name, **kwargs) if ssl_pem is not None: # creates wsgiserver.ssl_builtin as side-effect try: from cheroot.server import get_ssl_adapter_class from cheroot.ssl.builtin import BuiltinSSLAdapter except ImportError: from cherrypy.wsgiserver import get_ssl_adapter_class from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter get_ssl_adapter_class() server.ssl_adapter = BuiltinSSLAdapter(ssl_pem, ssl_pem) if protocol_version: server.protocol = protocol_version try: protocol = is_ssl and 'https' or 'http' if host == '0.0.0.0': print( 'serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' % (port, protocol, port), file=sys.stderr, ) else: print( 'serving on %s://%s:%s' % (protocol, host, port), file=sys.stderr, ) server.start() except (KeyboardInterrupt, SystemExit): server.stop() return server # FILE pyramid/src/pyramid/scripts/common.py def get_config_loader(config_uri): """ Find a ``plaster.ILoader`` object supporting the "wsgi" protocol. """ return plaster.get_loader(config_uri, protocols=['wsgi']) # FILE pyramid/src/pyramid/scripts/pserve.py def wsgiref_server_runner(wsgi_app, global_conf, **kw): # pragma: no cover from wsgiref.simple_server import make_server host = kw.get('host', '0.0.0.0') port = int(kw.get('port', 8080)) server = make_server(host, port, wsgi_app) print( 'Starting HTTP server on http://%s:%s' % (host, port), file=sys.stderr ) server.serve_forever() # FILE pyramid/src/pyramid/scripts/pserve.py def main(argv=sys.argv, quiet=False, original_ignore_files=None): command = PServeCommand( argv, quiet=quiet, original_ignore_files=original_ignore_files ) return command.run() # FILE pyramid/build/lib/pyramid/settings.py def aslist(value, flatten=True): """Return a list, separating the input based on newlines. Also if ``flatten`` is ``True`` (the default), and if the line is a string, then the line will be split on spaces. """ values = aslist_cronly(value) if not flatten: return values result = [] for value in values: if isinstance(value, str): value = value.split() result.extend(value) else: result.append(value) return result # FILE pyramid/build/lib/pyramid/path.py class AssetResolver(Resolver): """A class used to resolve an :term:`asset specification` to an :term:`asset descriptor`. .. versionadded:: 1.3 The constructor accepts a single argument named ``package`` which may be any of: - A fully qualified (not relative) dotted name to a module or package - a Python module or package object - The value ``None`` - The constant value :attr:`pyramid.path.CALLER_PACKAGE`. The default value is :attr:`pyramid.path.CALLER_PACKAGE`. The ``package`` is used when a relative asset specification is supplied to the :meth:`~pyramid.path.AssetResolver.resolve` method. An asset specification without a colon in it is treated as relative. If ``package`` is ``None``, the resolver will only be able to resolve fully qualified (not relative) asset specifications. Any attempt to resolve a relative asset specification will result in an :exc:`ValueError` exception. If ``package`` is :attr:`pyramid.path.CALLER_PACKAGE`, the resolver will treat relative asset specifications as relative to the caller of the :meth:`~pyramid.path.AssetResolver.resolve` method. If ``package`` is a *module* or *module name* (as opposed to a package or package name), its containing package is computed and this package is used to derive the package name (all names are resolved relative to packages, never to modules). For example, if the ``package`` argument to this type was passed the string ``xml.dom.expatbuilder``, and ``template.pt`` is supplied to the :meth:`~pyramid.path.AssetResolver.resolve` method, the resulting absolute asset spec would be ``xml.minidom:template.pt``, because ``xml.dom.expatbuilder`` is a module object, not a package object. If ``package`` is a *package* or *package name* (as opposed to a module or module name), this package will be used to compute relative asset specifications. For example, if the ``package`` argument to this type was passed the string ``xml.dom``, and ``template.pt`` is supplied to the :meth:`~pyramid.path.AssetResolver.resolve` method, the resulting absolute asset spec would be ``xml.minidom:template.pt``. """ def resolve(self, spec): """ Resolve the asset spec named as ``spec`` to an object that has the attributes and methods described in :class:`pyramid.interfaces.IAssetDescriptor`. If ``spec`` is an absolute filename (e.g. ``/path/to/myproject/templates/foo.pt``) or an absolute asset spec (e.g. ``myproject:templates.foo.pt``), an asset descriptor is returned without taking into account the ``package`` passed to this class' constructor. If ``spec`` is a *relative* asset specification (an asset specification without a ``:`` in it, e.g. ``templates/foo.pt``), the ``package`` argument of the constructor is used as the package portion of the asset spec. For example: .. code-block:: python a = AssetResolver('myproject') resolver = a.resolve('templates/foo.pt') print(resolver.abspath()) # -> /path/to/myproject/templates/foo.pt If the AssetResolver is constructed without a ``package`` argument of ``None``, and a relative asset specification is passed to ``resolve``, an :exc:`ValueError` exception is raised. """ if os.path.isabs(spec): return FSAssetDescriptor(spec) path = spec if ':' in path: package_name, path = spec.split(':', 1) else: if self.package is CALLER_PACKAGE: package_name = caller_package().__name__ else: package_name = getattr(self.package, '__name__', None) if package_name is None: raise ValueError( 'relative spec %r irresolveable without package' % (spec,) ) return PkgResourcesAssetDescriptor(package_name, path) Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/scripts/pserve.py import argparse import hupper import os import re import sys import textwrap import threading import time import webbrowser from pyramid.path import AssetResolver from pyramid.scripts.common import get_config_loader, parse_vars from pyramid.settings import aslist from wsgiref.simple_server import make_server from cheroot.wsgi import Server as WSGIServer from cherrypy.wsgiserver import CherryPyWSGIServer as WSGIServer from cheroot.server import get_ssl_adapter_class from cheroot.ssl.builtin import BuiltinSSLAdapter from cherrypy.wsgiserver import get_ssl_adapter_class from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter class PServeCommand: description = """\ This command serves a web application that uses a PasteDeploy configuration file for the server and application. You can also include variable assignments like 'http_port=8080' and then use %(http_port)s in your config files. """ default_verbosity = 1 parser = argparse.ArgumentParser( description=textwrap.dedent(description), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '-n', '--app-name', dest='app_name', metavar='NAME', help="Load the named application (default main)", ) parser.add_argument( '-s', '--server', dest='server', metavar='SERVER_TYPE', help="Use the named server.", ) parser.add_argument( '--server-name', dest='server_name', metavar='SECTION_NAME', help=( "Use the named server as defined in the configuration file " "(default: main)" ), ) parser.add_argument( '--reload', dest='reload', action='store_true', help="Use auto-restart file monitor", ) parser.add_argument( '--reload-interval', dest='reload_interval', default=1, help=( "Seconds between checking files (low number can cause " "significant CPU usage)" ), ) parser.add_argument( '-b', '--browser', dest='browser', action='store_true', help=( "Open a web browser to the server url. The server url is " "determined from the 'open_url' setting in the 'pserve' " "section of the configuration file." ), ) parser.add_argument( '-v', '--verbose', default=default_verbosity, dest='verbose', action='count', help="Set verbose level (default " + str(default_verbosity) + ")", ) parser.add_argument( '-q', '--quiet', action='store_const', const=0, dest='verbose', help="Suppress verbose output", ) parser.add_argument( 'config_uri', nargs='?', default=None, help='The URI to the configuration file.', ) parser.add_argument( 'config_vars', nargs='*', default=(), help="Variables required by the config file. For example, " "`http_port=%%(http_port)s` would expect `http_port=8080` to be " "passed here.", ) _get_config_loader = staticmethod(get_config_loader) # for testing open_url = None _scheme_re = re.compile(r'^[a-z][a-z]+:', re.I) def __init__(self, argv, quiet=False, original_ignore_files=None): self.args = self.parser.parse_args(argv[1:]) if quiet: self.args.verbose = 0 if self.args.reload: self.worker_kwargs = {'argv': argv, "quiet": quiet} self.watch_files = set() self.ignore_files = set() self.original_ignore_files = original_ignore_files def out(self, msg): # pragma: no cover if self.args.verbose > 0: print(msg, file=sys.stderr) def get_config_path(self, loader): return os.path.abspath(loader.uri.path) def pserve_file_config(self, loader, global_conf=None): settings = loader.get_settings('pserve', global_conf) config_path = self.get_config_path(loader) here = os.path.dirname(config_path) watch_files = aslist(settings.get('watch_files', ''), flatten=False) self.ignore_files = set( aslist(settings.get('ignore_files', ''), flatten=False) ) # track file paths relative to the ini file resolver = AssetResolver(package=None) for file in watch_files: if ':' in file: file = resolver.resolve(file).abspath() elif not os.path.isabs(file): file = os.path.join(here, file) self.watch_files.add(os.path.abspath(file)) # attempt to determine the url of the server open_url = settings.get('open_url') if open_url: self.open_url = open_url def guess_server_url(self, loader, server_name, global_conf=None):
pyramid/src/pyramid/scripts/pserve.py
aiohappybase._util.pep8_to_camel_case
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE happybase/aiohappybase/_util.py def thrift_type_to_dict(obj: Any) -> Dict[bytes, Any]: """Convert a Thrift data type to a regular dictionary.""" return { camel_case_to_pep8(attr): getattr(obj, attr) for attr in thrift_attrs(obj) } # FILE happybase/aiohappybase/_util.py def camel_case_to_pep8(name: str) -> str: """Convert a camel cased name to PEP8 style.""" converted = CAPITALS.sub(lambda m: '_' + m.groups()[0].lower(), name) return converted[1:] if converted[0] == '_' else converted Based on the information above, please complete the function: #CURRENT_FILE: happybase/aiohappybase/_util.py import re from typing import Dict, List, Any, AnyStr, Optional, TypeVar, Callable def pep8_to_camel_case(name: str, initial: bool = False) -> str: """Convert a PEP8 style name to camel case."""
happybase/aiohappybase/_util.py
aiohappybase._util.bytes_increment
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: happybase/aiohappybase/_util.py import re from typing import Dict, List, Any, AnyStr, Optional, TypeVar, Callable def bytes_increment(b: bytes) -> Optional[bytes]: """ Increment and truncate a byte string (for sorting purposes) This functions returns the shortest string that sorts after the given string when compared using regular string comparison semantics. This function increments the last byte that is smaller than ``0xFF``, and drops everything after it. If the string only contains ``0xFF`` bytes, `None` is returned. """
happybase/aiohappybase/_util.py
mssqlcli.config.ensure_dir_exists
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: mssql-cli/mssqlcli/config.py import os from os.path import expanduser, exists, dirname import shutil import platform from configobj import ConfigObj from mssqlcli import __file__ as package_root def ensure_dir_exists(path):
mssql-cli/mssqlcli/config.py
mssqlcli.telemetry._user_id_file_is_old
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mssql-cli/mssqlcli/telemetry.py def start(): _session.start_time = datetime.now() # LIB future class datetime(date): """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints. """ __slots__ = date.__slots__ + ( '_hour', '_minute', '_second', '_microsecond', '_tzinfo') def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): if isinstance(year, bytes) and len(year) == 10: # Pickle support self = date.__new__(cls, year[:4]) self.__setstate(year, month) return self _check_tzinfo_arg(tzinfo) _check_time_fields(hour, minute, second, microsecond) self = date.__new__(cls, year, month, day) self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo return self # Read-only field accessors @property def hour(self): """hour (0-23)""" return self._hour @property def minute(self): """minute (0-59)""" return self._minute @property def second(self): """second (0-59)""" return self._second @property def microsecond(self): """microsecond (0-999999)""" return self._microsecond @property def tzinfo(self): """timezone info object""" return self._tzinfo @classmethod def fromtimestamp(cls, t, tz=None): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ _check_tzinfo_arg(tz) converter = _time.localtime if tz is None else _time.gmtime t, frac = divmod(t, 1.0) us = int(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: t += 1 us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = converter(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them result = cls(y, m, d, hh, mm, ss, us, tz) if tz is not None: result = tz.fromutc(result) return result @classmethod def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." t, frac = divmod(t, 1.0) us = int(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: t += 1 us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them return cls(y, m, d, hh, mm, ss, us) # XXX This is supposed to do better than we *can* do by using time.time(), # XXX if the platform supports a more accurate way. The C implementation # XXX uses gettimeofday on platforms that have it, but that isn't # XXX available from Python. So now() may return different results # XXX across the implementations. @classmethod def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." t = _time.time() return cls.fromtimestamp(t, tz) @classmethod def utcnow(cls): "Construct a UTC datetime from time.time()." t = _time.time() return cls.utcfromtimestamp(t) @classmethod def combine(cls, date, time): "Construct a datetime from a given date and a given time." if not isinstance(date, _date_class): raise TypeError("date argument must be a date instance") if not isinstance(time, _time_class): raise TypeError("time argument must be a time instance") return cls(date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, time.tzinfo) def timetuple(self): "Return local time tuple compatible with time.localtime()." dst = self.dst() if dst is None: dst = -1 elif dst: dst = 1 else: dst = 0 return _build_struct_time(self.year, self.month, self.day, self.hour, self.minute, self.second, dst) def timestamp(self): "Return POSIX timestamp as float" if self._tzinfo is None: return _time.mktime((self.year, self.month, self.day, self.hour, self.minute, self.second, -1, -1, -1)) + self.microsecond / 1e6 else: return (self - _EPOCH).total_seconds() def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." offset = self.utcoffset() if offset: self -= offset y, m, d = self.year, self.month, self.day hh, mm, ss = self.hour, self.minute, self.second return _build_struct_time(y, m, d, hh, mm, ss, 0) def date(self): "Return the date part." return date(self._year, self._month, self._day) def time(self): "Return the time part, with tzinfo None." return time(self.hour, self.minute, self.second, self.microsecond) def timetz(self): "Return the time part, with same tzinfo." return time(self.hour, self.minute, self.second, self.microsecond, self._tzinfo) def replace(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=True): """Return a new datetime with new values for the specified fields.""" if year is None: year = self.year if month is None: month = self.month if day is None: day = self.day if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo _check_date_fields(year, month, day) _check_time_fields(hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) def astimezone(self, tz=None): if tz is None: if self.tzinfo is None: raise ValueError("astimezone() requires an aware datetime") ts = (self - _EPOCH) // timedelta(seconds=1) localtm = _time.localtime(ts) local = datetime(*localtm[:6]) try: # Extract TZ data if available gmtoff = localtm.tm_gmtoff zone = localtm.tm_zone except AttributeError: # Compute UTC offset and compare with the value implied # by tm_isdst. If the values match, use the zone name # implied by tm_isdst. delta = local - datetime(*_time.gmtime(ts)[:6]) dst = _time.daylight and localtm.tm_isdst > 0 gmtoff = -(_time.altzone if dst else _time.timezone) if delta == timedelta(seconds=gmtoff): tz = timezone(delta, _time.tzname[dst]) else: tz = timezone(delta) else: tz = timezone(timedelta(seconds=gmtoff), zone) elif not isinstance(tz, tzinfo): raise TypeError("tz argument must be an instance of tzinfo") mytz = self.tzinfo if mytz is None: raise ValueError("astimezone() requires an aware datetime") if tz is mytz: return self # Convert self to UTC, and attach the new time zone object. myoffset = self.utcoffset() if myoffset is None: raise ValueError("astimezone() requires an aware datetime") utc = (self - myoffset).replace(tzinfo=tz) # Convert from UTC to tz's local time. return tz.fromutc(utc) # Ways to produce a string. def ctime(self): "Return ctime() style string." weekday = self.toordinal() % 7 or 7 return "%s %s %2d %02d:%02d:%02d %04d" % ( _DAYNAMES[weekday], _MONTHNAMES[self._month], self._day, self._hour, self._minute, self._second, self._year) def isoformat(self, sep='T'): """Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'. """ s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + _format_time(self._hour, self._minute, self._second, self._microsecond)) off = self.utcoffset() if off is not None: if off.days < 0: sign = "-" off = -off else: sign = "+" hh, mm = divmod(off, timedelta(hours=1)) assert not mm % timedelta(minutes=1), "whole minute" mm //= timedelta(minutes=1) s += "%s%02d:%02d" % (sign, hh, mm) return s def __repr__(self): """Convert to formal string, for repr().""" L = [self._year, self._month, self._day, # These are never zero self._hour, self._minute, self._second, self._microsecond] if L[-1] == 0: del L[-1] if L[-1] == 0: del L[-1] s = ", ".join(map(str, L)) s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s) if self._tzinfo is not None: assert s[-1:] == ")" s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" return s def __str__(self): "Convert to string, for str()." return self.isoformat(sep=' ') @classmethod def strptime(cls, date_string, format): 'string, format -> new datetime parsed from a string (like time.strptime()).' import _strptime return _strptime._strptime_datetime(cls, date_string, format) def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" if self._tzinfo is None: return None offset = self._tzinfo.utcoffset(self) _check_utc_offset("utcoffset", offset) return offset def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ name = _call_tzinfo_method(self._tzinfo, "tzname", self) _check_tzname(name) return name def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ if self._tzinfo is None: return None offset = self._tzinfo.dst(self) _check_utc_offset("dst", offset) return offset # Comparisons of datetime objects with other. def __eq__(self, other): if isinstance(other, datetime): return self._cmp(other, allow_mixed=True) == 0 elif not isinstance(other, date): return NotImplemented else: return False def __ne__(self, other): if isinstance(other, datetime): return self._cmp(other, allow_mixed=True) != 0 elif not isinstance(other, date): return NotImplemented else: return True def __le__(self, other): if isinstance(other, datetime): return self._cmp(other) <= 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __lt__(self, other): if isinstance(other, datetime): return self._cmp(other) < 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __ge__(self, other): if isinstance(other, datetime): return self._cmp(other) >= 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def __gt__(self, other): if isinstance(other, datetime): return self._cmp(other) > 0 elif not isinstance(other, date): return NotImplemented else: _cmperror(self, other) def _cmp(self, other, allow_mixed=False): assert isinstance(other, datetime) mytz = self._tzinfo ottz = other._tzinfo myoff = otoff = None if mytz is ottz: base_compare = True else: myoff = self.utcoffset() otoff = other.utcoffset() base_compare = myoff == otoff if base_compare: return _cmp((self._year, self._month, self._day, self._hour, self._minute, self._second, self._microsecond), (other._year, other._month, other._day, other._hour, other._minute, other._second, other._microsecond)) if myoff is None or otoff is None: if allow_mixed: return 2 # arbitrary non-zero value else: raise TypeError("cannot compare naive and aware datetimes") # XXX What follows could be done more efficiently... diff = self - other # this will take offsets into account if diff.days < 0: return -1 return diff and 1 or 0 def __add__(self, other): "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented delta = timedelta(self.toordinal(), hours=self._hour, minutes=self._minute, seconds=self._second, microseconds=self._microsecond) delta += other hour, rem = divmod(delta.seconds, 3600) minute, second = divmod(rem, 60) if 0 < delta.days <= _MAXORDINAL: return datetime.combine(date.fromordinal(delta.days), time(hour, minute, second, delta.microseconds, tzinfo=self._tzinfo)) raise OverflowError("result out of range") __radd__ = __add__ def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): if isinstance(other, timedelta): return self + -other return NotImplemented days1 = self.toordinal() days2 = other.toordinal() secs1 = self._second + self._minute * 60 + self._hour * 3600 secs2 = other._second + other._minute * 60 + other._hour * 3600 base = timedelta(days1 - days2, secs1 - secs2, self._microsecond - other._microsecond) if self._tzinfo is other._tzinfo: return base myoff = self.utcoffset() otoff = other.utcoffset() if myoff == otoff: return base if myoff is None or otoff is None: raise TypeError("cannot mix naive and timezone-aware time") return base + otoff - myoff def __hash__(self): tzoff = self.utcoffset() if tzoff is None: return hash(self._getstate()[0]) days = _ymd2ord(self.year, self.month, self.day) seconds = self.hour * 3600 + self.minute * 60 + self.second return hash(timedelta(days, seconds, self.microsecond) - tzoff) # Pickle support. def _getstate(self): yhi, ylo = divmod(self._year, 256) us2, us3 = divmod(self._microsecond, 256) us1, us2 = divmod(us2, 256) basestate = bytes([yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3]) if self._tzinfo is None: return (basestate,) else: return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): (yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3) = string self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 if tzinfo is None or isinstance(tzinfo, _tzinfo_class): self._tzinfo = tzinfo else: raise TypeError("bad tzinfo state arg %r" % tzinfo) def __reduce__(self): return (self.__class__, self._getstate()) # LIB future class datetime(date): """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints. """ def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, ... def hour(self): """hour (0-23)""" ... def minute(self): """minute (0-59)""" ... def second(self): """second (0-59)""" ... def microsecond(self): """microsecond (0-999999)""" ... def tzinfo(self): """timezone info object""" ... def fromtimestamp(cls, t, tz=None): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ ... def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." ... def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." t = _time.time() return cls.fromtimestamp(t, tz) def utcnow(cls): "Construct a UTC datetime from time.time()." ... def combine(cls, date, time): "Construct a datetime from a given date and a given time." ... def timetuple(self): "Return local time tuple compatible with time.localtime()." ... def timestamp(self): "Return POSIX timestamp as float" ... def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." ... def date(self): "Return the date part." ... def time(self): "Return the time part, with tzinfo None." ... def timetz(self): "Return the time part, with same tzinfo." ... def replace(self, year=None, month=None, day=None, hour=None, """Return a new datetime with new values for the specified fields.""" ... def astimezone(self, tz=None): ... def ctime(self): "Return ctime() style string." ... def isoformat(self, sep='T'): """Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'. """ ... def __repr__(self): """Convert to formal string, for repr().""" ... def __str__(self): "Convert to string, for str()." ... def strptime(cls, date_string, format): 'string, format -> new datetime parsed from a string (like time.strptime()).' ... def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" ... def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ ... def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ ... def __eq__(self, other): ... def __ne__(self, other): ... def __le__(self, other): ... def __lt__(self, other): ... def __ge__(self, other): ... def __gt__(self, other): ... def _cmp(self, other, allow_mixed=False): ... def __add__(self, other): "Add a datetime and a timedelta." ... def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." ... def __hash__(self): ... def _getstate(self): ... def __setstate(self, string, tzinfo): ... def __reduce__(self): ... # LIB future class datetime(date): """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints. """ def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, ... def hour(self): """hour (0-23)""" ... def minute(self): """minute (0-59)""" ... def second(self): """second (0-59)""" ... def microsecond(self): """microsecond (0-999999)""" ... def tzinfo(self): """timezone info object""" ... def fromtimestamp(cls, t, tz=None): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ ... def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." ... def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." ... def utcnow(cls): "Construct a UTC datetime from time.time()." ... def combine(cls, date, time): "Construct a datetime from a given date and a given time." ... def timetuple(self): "Return local time tuple compatible with time.localtime()." ... def timestamp(self): "Return POSIX timestamp as float" if self._tzinfo is None: return _time.mktime((self.year, self.month, self.day, self.hour, self.minute, self.second, -1, -1, -1)) + self.microsecond / 1e6 else: return (self - _EPOCH).total_seconds() def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." ... def date(self): "Return the date part." ... def time(self): "Return the time part, with tzinfo None." ... def timetz(self): "Return the time part, with same tzinfo." ... def replace(self, year=None, month=None, day=None, hour=None, """Return a new datetime with new values for the specified fields.""" ... def astimezone(self, tz=None): ... def ctime(self): "Return ctime() style string." ... def isoformat(self, sep='T'): """Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'. """ ... def __repr__(self): """Convert to formal string, for repr().""" ... def __str__(self): "Convert to string, for str()." ... def strptime(cls, date_string, format): 'string, format -> new datetime parsed from a string (like time.strptime()).' ... def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" ... def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ ... def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ ... def __eq__(self, other): ... def __ne__(self, other): ... def __le__(self, other): ... def __lt__(self, other): ... def __ge__(self, other): ... def __gt__(self, other): ... def _cmp(self, other, allow_mixed=False): ... def __add__(self, other): "Add a datetime and a timedelta." ... def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." ... def __hash__(self): ... def _getstate(self): ... def __setstate(self, string, tzinfo): ... def __reduce__(self): ... # LIB future class datetime(date): """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints. """ def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, ... def hour(self): """hour (0-23)""" ... def minute(self): """minute (0-59)""" ... def second(self): """second (0-59)""" ... def microsecond(self): """microsecond (0-999999)""" ... def tzinfo(self): """timezone info object""" ... def fromtimestamp(cls, t, tz=None): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ ... def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." ... def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." ... def utcnow(cls): "Construct a UTC datetime from time.time()." ... def combine(cls, date, time): "Construct a datetime from a given date and a given time." ... def timetuple(self): "Return local time tuple compatible with time.localtime()." ... def timestamp(self): "Return POSIX timestamp as float" ... def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." ... def date(self): "Return the date part." return date(self._year, self._month, self._day) def time(self): "Return the time part, with tzinfo None." ... def timetz(self): "Return the time part, with same tzinfo." ... def replace(self, year=None, month=None, day=None, hour=None, """Return a new datetime with new values for the specified fields.""" ... def astimezone(self, tz=None): ... def ctime(self): "Return ctime() style string." ... def isoformat(self, sep='T'): """Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'. """ ... def __repr__(self): """Convert to formal string, for repr().""" ... def __str__(self): "Convert to string, for str()." ... def strptime(cls, date_string, format): 'string, format -> new datetime parsed from a string (like time.strptime()).' ... def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" ... def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ ... def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ ... def __eq__(self, other): ... def __ne__(self, other): ... def __le__(self, other): ... def __lt__(self, other): ... def __ge__(self, other): ... def __gt__(self, other): ... def _cmp(self, other, allow_mixed=False): ... def __add__(self, other): "Add a datetime and a timedelta." ... def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." ... def __hash__(self): ... def _getstate(self): ... def __setstate(self, string, tzinfo): ... def __reduce__(self): ... # LIB future class datetime(date): """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints. """ def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, ... def hour(self): """hour (0-23)""" ... def minute(self): """minute (0-59)""" ... def second(self): """second (0-59)""" ... def microsecond(self): """microsecond (0-999999)""" ... def tzinfo(self): """timezone info object""" ... def fromtimestamp(cls, t, tz=None): """Construct a datetime from a POSIX timestamp (like time.time()). A timezone info object may be passed in as well. """ ... def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." ... def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." ... def utcnow(cls): "Construct a UTC datetime from time.time()." ... def combine(cls, date, time): "Construct a datetime from a given date and a given time." ... def timetuple(self): "Return local time tuple compatible with time.localtime()." ... def timestamp(self): "Return POSIX timestamp as float" ... def utctimetuple(self): "Return UTC time tuple compatible with time.gmtime()." ... def date(self): "Return the date part." ... def time(self): "Return the time part, with tzinfo None." return time(self.hour, self.minute, self.second, self.microsecond) def timetz(self): "Return the time part, with same tzinfo." ... def replace(self, year=None, month=None, day=None, hour=None, """Return a new datetime with new values for the specified fields.""" ... def astimezone(self, tz=None): ... def ctime(self): "Return ctime() style string." ... def isoformat(self, sep='T'): """Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'. """ ... def __repr__(self): """Convert to formal string, for repr().""" ... def __str__(self): "Convert to string, for str()." ... def strptime(cls, date_string, format): 'string, format -> new datetime parsed from a string (like time.strptime()).' ... def utcoffset(self): """Return the timezone offset in minutes east of UTC (negative west of UTC).""" ... def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ ... def dst(self): """Return 0 if DST is not in effect, or the DST offset (in minutes eastward) if DST is in effect. This is purely informational; the DST offset has already been added to the UTC offset returned by utcoffset() if applicable, so there's no need to consult dst() unless you're interested in displaying the DST info. """ ... def __eq__(self, other): ... def __ne__(self, other): ... def __le__(self, other): ... def __lt__(self, other): ... def __ge__(self, other): ... def __gt__(self, other): ... def _cmp(self, other, allow_mixed=False): ... def __add__(self, other): "Add a datetime and a timedelta." ... def __sub__(self, other): "Subtract two datetimes, or a datetime and a timedelta." ... def __hash__(self): ... def _getstate(self): ... def __setstate(self, string, tzinfo): ... def __reduce__(self): ... # LIB future class timedelta(object): """Represent the difference between two datetime objects. Supported operators: - add, subtract timedelta - unary plus, minus, abs - compare to timedelta - multiply, divide by int In addition, datetime supports subtraction of two datetime objects returning a timedelta, and addition or subtraction of a datetime and a timedelta giving a datetime. Representation: (days, seconds, microseconds). Why? Because I felt like it. """ def __new__(cls, days=0, seconds=0, microseconds=0, ... def __repr__(self): ... def __str__(self): ... def total_seconds(self): """Total seconds in the duration.""" ... def days(self): """days""" return self._days def seconds(self): """seconds""" ... def microseconds(self): """microseconds""" ... def __add__(self, other): ... def __sub__(self, other): ... def __rsub__(self, other): ... def __neg__(self): ... def __pos__(self): ... def __abs__(self): ... def __mul__(self, other): ... def _to_microseconds(self): ... def __floordiv__(self, other): ... def __truediv__(self, other): ... def __mod__(self, other): ... def __divmod__(self, other): ... def __eq__(self, other): ... def __ne__(self, other): ... def __le__(self, other): ... def __lt__(self, other): ... def __ge__(self, other): ... def __gt__(self, other): ... def _cmp(self, other): ... def __hash__(self): ... def __bool__(self): ... def _getstate(self): ... def __reduce__(self): ... Based on the information above, please complete the function: #CURRENT_FILE: mssql-cli/mssqlcli/telemetry.py import binascii import json import locale import os import subprocess import platform import re import sys import traceback import uuid from functools import wraps from datetime import datetime, timedelta from mssqlcli import __version__ as mssql_cli_version import mssqlcli.config as config import mssqlcli.telemetry_upload as telemetry_core import mssqlcli.decorators as decorators def _user_id_file_is_old(id_file_path):
mssql-cli/mssqlcli/telemetry.py
mssqlcli.util.is_command_valid
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mssql-cli/mssqlcli/util.py def encode(s): try: return s.encode('utf-8') except (AttributeError, SyntaxError): pass return s Based on the information above, please complete the function: #CURRENT_FILE: mssql-cli/mssqlcli/util.py from os import devnull import subprocess def is_command_valid(command): """ Checks if command is recognized on machine. Used to determine installations of 'less' pager. """
mssql-cli/mssqlcli/util.py
mssqlcli.packages.parseutils.utils.find_prev_keyword
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mssql-cli/mssqlcli/packages/parseutils/utils.py def last_word(text, include='alphanum_underscore'): r""" Find the last word in a sentence. >>> last_word('abc') 'abc' >>> last_word(' abc') 'abc' >>> last_word('') '' >>> last_word(' ') '' >>> last_word('abc ') '' >>> last_word('abc def') 'def' >>> last_word('abc def ') '' >>> last_word('abc def;') '' >>> last_word('bac $def') 'def' >>> last_word('bac $def', include='most_punctuations') '$def' >>> last_word('bac \def', include='most_punctuations') '\\\\def' >>> last_word('bac \def;', include='most_punctuations') '\\\\def;' >>> last_word('bac::def', include='most_punctuations') 'def' >>> last_word('"foo*bar', include='most_punctuations') '"foo*bar' """ if not text: # Empty string return '' if text[-1].isspace(): return '' regex = cleanup_regex[include] matches = regex.search(text) if matches: return matches.group(0) return '' # FILE mssql-cli/mssqlcli/packages/parseutils/utils.py def parse_partial_identifier(word): """Attempt to parse a (partially typed) word as an identifier word may include a schema qualification, like `schema_name.partial_name` or `schema_name.` There may also be unclosed quotation marks, like `"schema`, or `schema."partial_name` :param word: string representing a (partially complete) identifier :return: sqlparse.sql.Identifier, or None """ p = sqlparse.parse(word)[0] n_tok = len(p.tokens) if n_tok == 1 and isinstance(p.tokens[0], Identifier): return p.tokens[0] if p.token_next_by(m=(Error, '"'))[1]: # An unmatched double quote, e.g. '"foo', 'foo."', or 'foo."bar' # Close the double quote, then reparse return parse_partial_identifier(word + '"') return None Based on the information above, please complete the function: #CURRENT_FILE: mssql-cli/mssqlcli/packages/parseutils/utils.py from __future__ import print_function import re import sqlparse from sqlparse.sql import Identifier from sqlparse.tokens import Token, Error def find_prev_keyword(sql, n_skip=0): """ Find the last sql keyword in an SQL statement Returns the value of the last keyword, and the text of the query with everything after the last keyword stripped """
mssql-cli/mssqlcli/packages/parseutils/utils.py
pyramid.util.text_
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE pyramid/src/pyramid/util.py def get_callable_name(name): """ Verifies that the ``name`` is ascii and will raise a ``ConfigurationError`` if it is not. """ try: return ascii_(name) except (UnicodeEncodeError, UnicodeDecodeError): # avoid circular dependency from pyramid.exceptions import ConfigurationError msg = ( '`name="%s"` is invalid. `name` must be ascii because it is ' 'used on __name__ of the method' ) raise ConfigurationError(msg % name) # FILE pyramid/src/pyramid/util.py def ascii_(s): """ If ``s`` is an instance of ``str``, return ``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')`` """ if isinstance(s, str): s = s.encode('ascii') return str(s, 'ascii', 'strict') # FILE pyramid/src/pyramid/util.py def bytes_(s, encoding='latin-1', errors='strict'): """If ``s`` is an instance of ``str``, return ``s.encode(encoding, errors)``, otherwise return ``s``""" if isinstance(s, str): return s.encode(encoding, errors) return s Based on the information above, please complete the function: #CURRENT_FILE: pyramid/src/pyramid/util.py from contextlib import contextmanager import functools from hmac import compare_digest import inspect import platform import weakref from pyramid.path import DottedNameResolver as _DottedNameResolver import __pypy__ import pyramid.decorator from pyramid.exceptions import ConfigurationError from pyramid.exceptions import ConfigurationError from pyramid.exceptions import CyclicDependencyError from pyramid.exceptions import ConfigurationError def text_(s, encoding='latin-1', errors='strict'): """If ``s`` is an instance of ``bytes``, return ``s.decode(encoding, errors)``, otherwise return ``s``"""
pyramid/src/pyramid/util.py
datasette.filters.where_filters
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE datasette/datasette/filters.py def search_filters(request, database, table, datasette): # ?_search= and _search_colname= async def inner(): where_clauses = [] params = {} human_descriptions = [] extra_context = {} # Figure out which fts_table to use table_metadata = datasette.table_metadata(database, table) db = datasette.get_database(database) fts_table = request.args.get("_fts_table") fts_table = fts_table or table_metadata.get("fts_table") fts_table = fts_table or await db.fts_table(table) fts_pk = request.args.get("_fts_pk", table_metadata.get("fts_pk", "rowid")) search_args = { key: request.args[key] for key in request.args if key.startswith("_search") and key != "_searchmode" } search = "" search_mode_raw = table_metadata.get("searchmode") == "raw" # Or set search mode from the querystring qs_searchmode = request.args.get("_searchmode") if qs_searchmode == "escaped": search_mode_raw = False if qs_searchmode == "raw": search_mode_raw = True extra_context["supports_search"] = bool(fts_table) if fts_table and search_args: if "_search" in search_args: # Simple ?_search=xxx search = search_args["_search"] where_clauses.append( "{fts_pk} in (select rowid from {fts_table} where {fts_table} match {match_clause})".format( fts_table=escape_sqlite(fts_table), fts_pk=escape_sqlite(fts_pk), match_clause=":search" if search_mode_raw else "escape_fts(:search)", ) ) human_descriptions.append(f'search matches "{search}"') params["search"] = search extra_context["search"] = search else: # More complex: search against specific columns for i, (key, search_text) in enumerate(search_args.items()): search_col = key.split("_search_", 1)[1] if search_col not in await db.table_columns(fts_table): raise BadRequest("Cannot search by that column") where_clauses.append( "rowid in (select rowid from {fts_table} where {search_col} match {match_clause})".format( fts_table=escape_sqlite(fts_table), search_col=escape_sqlite(search_col), match_clause=":search_{}".format(i) if search_mode_raw else "escape_fts(:search_{})".format(i), ) ) human_descriptions.append( f'search column "{search_col}" matches "{search_text}"' ) params[f"search_{i}"] = search_text extra_context["search"] = search_text return FilterArguments(where_clauses, params, human_descriptions, extra_context) return inner # FILE datasette/datasette/filters.py class Filters: def __init__(self, pairs, units=None, ureg=None): if units is None: units = {} self.pairs = pairs self.units = units self.ureg = ureg def lookups(self): """Yields (lookup, display, no_argument) pairs""" ... def human_description_en(self, extra=None): bits = [] if extra: bits.extend(extra) for column, lookup, value in self.selections(): filter = self._filters_by_key.get(lookup, None) if filter: bits.append(filter.human_clause(column, value)) # Comma separated, with an ' and ' at the end and_bits = [] commas, tail = bits[:-1], bits[-1:] if commas: and_bits.append(", ".join(commas)) if tail: and_bits.append(tail[0]) s = " and ".join(and_bits) if not s: return "" return f"where {s}" def selections(self): """Yields (column, lookup, value) tuples""" ... def has_selections(self): ... def convert_unit(self, column, value): """If the user has provided a unit in the query, convert it into the column unit, if present.""" ... def build_where_clauses(self, table): ... # FILE datasette/datasette/filters.py class FilterArguments: def __init__( self, where_clauses, params=None, human_descriptions=None, extra_context=None ): self.where_clauses = where_clauses self.params = params or {} self.human_descriptions = human_descriptions or [] self.extra_context = extra_context or {} # FILE datasette/datasette/filters.py class Filter: key = None display = None no_argument = False def where_clause(self, table, column, value, param_counter): raise NotImplementedError def human_clause(self, column, value): raise NotImplementedError # FILE datasette/datasette/utils/asgi.py class BadRequest(Base400): status = 400 # FILE datasette/datasette/filters.py class Filter: def where_clause(self, table, column, value, param_counter): raise NotImplementedError def human_clause(self, column, value): ... # FILE datasette/datasette/filters.py class InFilter(Filter): def split_value(self, value): ... def where_clause(self, table, column, value, param_counter): values = self.split_value(value) params = [f":p{param_counter + i}" for i in range(len(values))] sql = f"{escape_sqlite(column)} in ({', '.join(params)})" return sql, values def human_clause(self, column, value): ... # FILE datasette/datasette/filters.py class NotInFilter(InFilter): def where_clause(self, table, column, value, param_counter): values = self.split_value(value) params = [f":p{param_counter + i}" for i in range(len(values))] sql = f"{escape_sqlite(column)} not in ({', '.join(params)})" return sql, values def human_clause(self, column, value): ... # FILE datasette/datasette/filters.py class TemplatedFilter(Filter): def __init__( self, key, display, sql_template, human_template, format="{}", numeric=False, no_argument=False, ): self.key = key self.display = display self.sql_template = sql_template self.human_template = human_template self.format = format self.numeric = numeric self.no_argument = no_argument def where_clause(self, table, column, value, param_counter): converted = self.format.format(value) if self.numeric and converted.isdigit(): converted = int(converted) if self.no_argument: kwargs = {"c": column} converted = None else: kwargs = {"c": column, "p": f"p{param_counter}", "t": table} return self.sql_template.format(**kwargs), converted def human_clause(self, column, value): ... Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/filters.py from datasette import hookimpl from datasette.views.base import DatasetteError from datasette.utils.asgi import BadRequest import json import numbers from .utils import detect_json1, escape_sqlite, path_with_removed_args def where_filters(request, database, datasette): # This one deals with ?_where=
datasette/datasette/filters.py
datasette.utils.path_with_added_args
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def path_with_added_args(request, args, path=None):
datasette/datasette/utils/__init__.py
datasette.utils.path_with_replaced_args
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def path_with_replaced_args(request, args, path=None):
datasette/datasette/utils/__init__.py
datasette.utils.format_bytes
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def format_bytes(bytes):
datasette/datasette/utils/__init__.py
datasette.utils.actor_matches_allow
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE datasette/datasette/utils/__init__.py def display_actor(actor): for key in ("display", "name", "username", "login", "id"): if actor.get(key): return actor[key] return str(actor) # FILE datasette/datasette/utils/__init__.py class MultiParams: def __init__(self, data): # data is a dictionary of key => [list, of, values] or a list of [["key", "value"]] pairs if isinstance(data, dict): for key in data: assert isinstance( data[key], (list, tuple) ), "dictionary data should be a dictionary of key => [list]" self._data = data elif isinstance(data, list) or isinstance(data, tuple): new_data = {} for item in data: assert ( isinstance(item, (list, tuple)) and len(item) == 2 ), "list data should be a list of [key, value] pairs" key, value = item new_data.setdefault(key, []).append(value) self._data = new_data def __repr__(self): ... def __contains__(self, key): ... def __getitem__(self, key): ... def keys(self): return self._data.keys() def __iter__(self): ... def __len__(self): ... def get(self, name, default=None): """Return first value in the list, if available""" ... def getlist(self, name): """Return full list""" ... Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def actor_matches_allow(actor, allow):
datasette/datasette/utils/__init__.py
datasette.utils.resolve_env_secrets
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE datasette/datasette/utils/__init__.py class CustomJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, sqlite3.Row): return tuple(obj) if isinstance(obj, sqlite3.Cursor): return list(obj) if isinstance(obj, bytes): # Does it encode to utf8? try: return obj.decode("utf8") except UnicodeDecodeError: return { "$base64": True, "encoded": base64.b64encode(obj).decode("latin1"), } return json.JSONEncoder.default(self, obj) # FILE datasette/datasette/utils/__init__.py class MultiParams: def __init__(self, data): # data is a dictionary of key => [list, of, values] or a list of [["key", "value"]] pairs if isinstance(data, dict): for key in data: assert isinstance( data[key], (list, tuple) ), "dictionary data should be a dictionary of key => [list]" self._data = data elif isinstance(data, list) or isinstance(data, tuple): new_data = {} for item in data: assert ( isinstance(item, (list, tuple)) and len(item) == 2 ), "list data should be a list of [key, value] pairs" key, value = item new_data.setdefault(key, []).append(value) self._data = new_data def __repr__(self): ... def __contains__(self, key): ... def __getitem__(self, key): ... def keys(self): return self._data.keys() def __iter__(self): ... def __len__(self): ... def get(self, name, default=None): """Return first value in the list, if available""" ... def getlist(self, name): """Return full list""" ... Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def resolve_env_secrets(config, environ): """Create copy that recursively replaces {"$env": "NAME"} with values from environ"""
datasette/datasette/utils/__init__.py
datasette.utils.display_actor
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE datasette/datasette/utils/__init__.py class MultiParams: def __init__(self, data): # data is a dictionary of key => [list, of, values] or a list of [["key", "value"]] pairs if isinstance(data, dict): for key in data: assert isinstance( data[key], (list, tuple) ), "dictionary data should be a dictionary of key => [list]" self._data = data elif isinstance(data, list) or isinstance(data, tuple): new_data = {} for item in data: assert ( isinstance(item, (list, tuple)) and len(item) == 2 ), "list data should be a list of [key, value] pairs" key, value = item new_data.setdefault(key, []).append(value) self._data = new_data def __repr__(self): ... def __contains__(self, key): ... def __getitem__(self, key): ... def keys(self): return self._data.keys() def __iter__(self): ... def __len__(self): ... def get(self, name, default=None): """Return first value in the list, if available""" ... def getlist(self, name): """Return full list""" ... # FILE datasette/datasette/utils/__init__.py class StaticMount(click.ParamType): def convert(self, value, param, ctx): if ":" not in value: self.fail( f'"{value}" should be of format mountpoint:directory', param, ctx, ) path, dirpath = value.split(":", 1) dirpath = os.path.abspath(dirpath) if not os.path.exists(dirpath) or not os.path.isdir(dirpath): self.fail(f"{value} is not a valid directory path", param, ctx) return path, dirpath # FILE datasette/datasette/utils/__init__.py class LoadExtension(click.ParamType): def convert(self, value, param, ctx): if ":" not in value: return value path, entrypoint = value.split(":", 1) return path, entrypoint Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def display_actor(actor):
datasette/datasette/utils/__init__.py
datasette.utils.initial_path_for_datasette
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE datasette/datasette/utils/__init__.py class MultiParams: def __init__(self, data): # data is a dictionary of key => [list, of, values] or a list of [["key", "value"]] pairs if isinstance(data, dict): for key in data: assert isinstance( data[key], (list, tuple) ), "dictionary data should be a dictionary of key => [list]" self._data = data elif isinstance(data, list) or isinstance(data, tuple): new_data = {} for item in data: assert ( isinstance(item, (list, tuple)) and len(item) == 2 ), "list data should be a list of [key, value] pairs" key, value = item new_data.setdefault(key, []).append(value) self._data = new_data def __repr__(self): ... def __contains__(self, key): ... def __getitem__(self, key): ... def keys(self): ... def __iter__(self): ... def __len__(self): ... def get(self, name, default=None): """Return first value in the list, if available""" try: return self._data.get(name)[0] except (KeyError, TypeError): return default def getlist(self, name): """Return full list""" ... Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo async def initial_path_for_datasette(datasette): """Return suggested path for opening this Datasette, based on number of DBs and tables"""
datasette/datasette/utils/__init__.py
datasette.utils.tilde_decode
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE datasette/datasette/utils/__init__.py def path_from_row_pks(row, pks, use_rowid, quote=True): """Generate an optionally tilde-encoded unique identifier for a row from its primary keys.""" if use_rowid: bits = [row["rowid"]] else: bits = [ row[pk]["value"] if isinstance(row[pk], dict) else row[pk] for pk in pks ] if quote: bits = [tilde_encode(str(bit)) for bit in bits] else: bits = [str(bit) for bit in bits] return ",".join(bits) # FILE datasette/datasette/utils/__init__.py def tilde_encode(s: str) -> str: "Returns tilde-encoded string - for example ``/foo/bar`` -> ``~2Ffoo~2Fbar``" return "".join(_tilde_encoder(char) for char in s.encode("utf-8")) # FILE datasette/datasette/utils/__init__.py class TildeEncoder(dict): def __missing__(self, b): # Handle a cache miss, store encoded string in cache and return. if b in _TILDE_ENCODING_SAFE: res = chr(b) elif b == _space: res = "+" else: res = "~{:02X}".format(b) self[b] = res return res # FILE datasette/datasette/utils/__init__.py class StaticMount(click.ParamType): def convert(self, value, param, ctx): if ":" not in value: self.fail( f'"{value}" should be of format mountpoint:directory', param, ctx, ) path, dirpath = value.split(":", 1) dirpath = os.path.abspath(dirpath) if not os.path.exists(dirpath) or not os.path.isdir(dirpath): self.fail(f"{value} is not a valid directory path", param, ctx) return path, dirpath # FILE datasette/datasette/utils/__init__.py class LoadExtension(click.ParamType): def convert(self, value, param, ctx): if ":" not in value: return value path, entrypoint = value.split(":", 1) return path, entrypoint Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def tilde_decode(s: str) -> str: "Decodes a tilde-encoded string, so ``~2Ffoo~2Fbar`` -> ``/foo/bar``" # Avoid accidentally decoding a %2f style sequence
datasette/datasette/utils/__init__.py
datasette.utils.resolve_routes
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def resolve_routes(routes, path):
datasette/datasette/utils/__init__.py
datasette.utils.truncate_url
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE datasette/datasette/utils/__init__.py def is_url(value): """Must start with http:// or https:// and contain JUST a URL""" if not isinstance(value, str): return False if not value.startswith("http://") and not value.startswith("https://"): return False # Any whitespace at all is invalid if whitespace_re.search(value): return False return True Based on the information above, please complete the function: #CURRENT_FILE: datasette/datasette/utils/__init__.py import asyncio from contextlib import contextmanager import click from collections import OrderedDict, namedtuple, Counter import base64 import hashlib import inspect import json import markupsafe import mergedeep import os import re import shlex import tempfile import typing import time import types import secrets import shutil import urllib import yaml from .shutil_backport import copytree from .sqlite import sqlite3, supports_table_xinfo def truncate_url(url, length):
datasette/datasette/utils/__init__.py
kinto.core.authorization.groupfinder
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE kinto/kinto/core/authorization.py class AuthorizationPolicy: """Default authorization class, that leverages the permission backend for shareable resources. """ def permits(self, context, principals, permission): ... def _get_bound_permissions(self, object_id, permission): ... def principals_allowed_by_permission(self, context, permission): raise NotImplementedError() # FILE kinto/kinto/core/authorization.py class RouteFactory: def __init__(self, request): # Store some shortcuts. permission = request.registry.permission self._check_permission = permission.check_permission self._get_accessible_objects = permission.get_accessible_objects self.get_prefixed_principals = functools.partial(utils.prefixed_principals, request) # Store current resource and required permission. service = utils.current_service(request) is_on_resource = ( service is not None and hasattr(service, "viewset") and hasattr(service, "resource") ) self._resource = None if is_on_resource: self.resource_name = request.current_resource_name self.on_plural_endpoint = getattr(service, "type", None) == "plural" # Check if this request targets an individual object. # Its existence will affect permissions checking (cf `_find_required_permission()`). # There are cases where the permission is not directly related to the HTTP method, # For example: # - with POST on plural endpoint, with an id supplied # - with PUT on an object, which can either be creation or update is_write_on_object = not self.on_plural_endpoint and request.method.lower() in ( "put", "delete", "patch", ) is_post_on_plural = self.on_plural_endpoint and request.method.lower() == "post" if is_write_on_object or is_post_on_plural: # We instantiate the resource to determine the object targeted by the request. self._resource = resource = service.resource(request=request, context=self) if resource.object_id is not None: # Skip POST on plural without id. try: # Save a reference, to avoid refetching from storage in resource. self.current_object = resource.model.get_object(resource.object_id) except storage_exceptions.ObjectNotFoundError: pass self.permission_object_id, self.required_permission = self._find_required_permission( request, service ) # To obtain shared objects on a plural endpoint, use a match: self._object_id_match = self.get_permission_object_id(request, "*") self._settings = request.registry.settings def check_permission(self, principals, bound_perms): """Read allowed principals from settings, if not any, query the permission backend to check if view is allowed. """ if not bound_perms: bound_perms = [(self.resource_name, self.required_permission)] for _, permission in bound_perms: # With Kinto inheritance tree, we can have: `permission = "record:create"` if self.resource_name and permission.startswith(self.resource_name): setting = f"{permission.replace(':', '_')}_principals" else: setting = f"{self.resource_name}_{permission}_principals" allowed_principals = aslist(self._settings.get(setting, "")) if allowed_principals: if bool(set(allowed_principals) & set(principals)): return True return self._check_permission(principals, bound_perms) def fetch_shared_objects(self, perm, principals, get_bound_permissions): """Fetch objects that are readable or writable for the current principals. See :meth:`kinto.core.authorization.AuthorizationPolicy.permits` If no object is shared, it returns None. .. warning:: This sets the ``shared_ids`` attribute to the context with the return value. The attribute is then read by :class:`kinto.core.resource.Resource` """ ... def get_permission_object_id(self, request, object_id=None): """Returns the permission object id for the current request. In the nominal case, it is just the current URI without version prefix. For plural endpoint, it is the related object URI using the specified `object_id`. See :meth:`kinto.core.resource.model.SharableModel` and :meth:`kinto.core.authorization.RouteFactory.__init__` """ ... def _extract_object_id(self, object_uri): ... def _find_required_permission(self, request, service): """Find out what is the permission object id and the required permission. .. note:: This method saves an attribute ``self.current_object`` used in :class:`kinto.core.resource.Resource`. """ ... # LIB pyramid class IAuthorizationPolicy(Interface): """An object representing a Pyramid authorization policy. .. deprecated:: 2.0 Authentication policies have been removed in favor of security policies. See :ref:`upgrading_auth_20` for more information. """ def permits(context, principals, permission): """Return an instance of :class:`pyramid.security.Allowed` if any of the ``principals`` is allowed the ``permission`` in the current ``context``, else return an instance of :class:`pyramid.security.Denied`. """ def principals_allowed_by_permission(context, permission): """Return a set of principal identifiers allowed by the ``permission`` in ``context``. This behavior is optional; if you choose to not implement it you should define this method as something which raises a ``NotImplementedError``. This method will only be called when the ``pyramid.security.principals_allowed_by_permission`` API is used.""" ... # LIB pyramid class IAuthorizationPolicy(Interface): """An object representing a Pyramid authorization policy. .. deprecated:: 2.0 Authentication policies have been removed in favor of security policies. See :ref:`upgrading_auth_20` for more information. """ def permits(context, principals, permission): """Return an instance of :class:`pyramid.security.Allowed` if any of the ``principals`` is allowed the ``permission`` in the current ``context``, else return an instance of :class:`pyramid.security.Denied`. """ ... def principals_allowed_by_permission(context, permission): """Return a set of principal identifiers allowed by the ``permission`` in ``context``. This behavior is optional; if you choose to not implement it you should define this method as something which raises a ``NotImplementedError``. This method will only be called when the ``pyramid.security.principals_allowed_by_permission`` API is used.""" # FILE kinto/kinto/core/authorization.py class RouteFactory: resource_name = None on_plural_endpoint = False required_permission = None permission_object_id = None current_object = None shared_ids = None method_permissions = { "head": "read", "get": "read", "post": "create", "delete": "write", "patch": "write", } def __init__(self, request): # Store some shortcuts. permission = request.registry.permission self._check_permission = permission.check_permission self._get_accessible_objects = permission.get_accessible_objects self.get_prefixed_principals = functools.partial(utils.prefixed_principals, request) # Store current resource and required permission. service = utils.current_service(request) is_on_resource = ( service is not None and hasattr(service, "viewset") and hasattr(service, "resource") ) self._resource = None if is_on_resource: self.resource_name = request.current_resource_name self.on_plural_endpoint = getattr(service, "type", None) == "plural" # Check if this request targets an individual object. # Its existence will affect permissions checking (cf `_find_required_permission()`). # There are cases where the permission is not directly related to the HTTP method, # For example: # - with POST on plural endpoint, with an id supplied # - with PUT on an object, which can either be creation or update is_write_on_object = not self.on_plural_endpoint and request.method.lower() in ( "put", "delete", "patch", ) is_post_on_plural = self.on_plural_endpoint and request.method.lower() == "post" if is_write_on_object or is_post_on_plural: # We instantiate the resource to determine the object targeted by the request. self._resource = resource = service.resource(request=request, context=self) if resource.object_id is not None: # Skip POST on plural without id. try: # Save a reference, to avoid refetching from storage in resource. self.current_object = resource.model.get_object(resource.object_id) except storage_exceptions.ObjectNotFoundError: pass self.permission_object_id, self.required_permission = self._find_required_permission( request, service ) # To obtain shared objects on a plural endpoint, use a match: self._object_id_match = self.get_permission_object_id(request, "*") self._settings = request.registry.settings def check_permission(self, principals, bound_perms): """Read allowed principals from settings, if not any, query the permission backend to check if view is allowed. """ if not bound_perms: bound_perms = [(self.resource_name, self.required_permission)] for _, permission in bound_perms: # With Kinto inheritance tree, we can have: `permission = "record:create"` if self.resource_name and permission.startswith(self.resource_name): setting = f"{permission.replace(':', '_')}_principals" else: setting = f"{self.resource_name}_{permission}_principals" allowed_principals = aslist(self._settings.get(setting, "")) if allowed_principals: if bool(set(allowed_principals) & set(principals)): return True return self._check_permission(principals, bound_perms) def fetch_shared_objects(self, perm, principals, get_bound_permissions): """Fetch objects that are readable or writable for the current principals. See :meth:`kinto.core.authorization.AuthorizationPolicy.permits` If no object is shared, it returns None. .. warning:: This sets the ``shared_ids`` attribute to the context with the return value. The attribute is then read by :class:`kinto.core.resource.Resource` """ if get_bound_permissions: bound_perms = get_bound_permissions(self._object_id_match, perm) else: bound_perms = [(self._object_id_match, perm)] by_obj_id = self._get_accessible_objects(principals, bound_perms, with_children=False) ids = by_obj_id.keys() # Store for later use in ``Resource``. self.shared_ids = [self._extract_object_id(id_) for id_ in ids] return self.shared_ids def get_permission_object_id(self, request, object_id=None): """Returns the permission object id for the current request. In the nominal case, it is just the current URI without version prefix. For plural endpoint, it is the related object URI using the specified `object_id`. See :meth:`kinto.core.resource.model.SharableModel` and :meth:`kinto.core.authorization.RouteFactory.__init__` """ object_uri = utils.strip_uri_prefix(request.path) if self.on_plural_endpoint and object_id is not None: # With the current request on a plural endpoint, the object URI must # be found out by inspecting the "plural" service and its sibling # "object" service. (see `register_resource()`) matchdict = {**request.matchdict, "id": object_id} try: object_uri = utils.instance_uri(request, self.resource_name, **matchdict) object_uri = object_uri.replace("%2A", "*") except KeyError: # Maybe the resource has no single object endpoint. # We consider that object URIs in permissions backend will # be stored naively: object_uri = f"{object_uri}/{object_id}" return object_uri def _extract_object_id(self, object_uri): # XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id'] return object_uri.split("/")[-1] def _find_required_permission(self, request, service): """Find out what is the permission object id and the required permission. .. note:: This method saves an attribute ``self.current_object`` used in :class:`kinto.core.resource.Resource`. """ # By default, it's a URI a and permission associated to the method. permission_object_id = self.get_permission_object_id(request) method = request.method.lower() required_permission = self.method_permissions.get(method) # For create permission, the object id is the plural endpoint. plural_path = str(service.plural_path) plural_path = plural_path.format_map(request.matchdict) # In the case of a "PUT", check if the targetted object already # exists, return "write" if it does, "create" otherwise. if request.method.lower() == "put": if self.current_object is None: # The object does not exist, the permission to create on # the related plural endpoint is required. permission_object_id = plural_path required_permission = "create" else: # For safe creations, the user needs a create permission. # See Kinto/kinto#792 if request.headers.get("If-None-Match") == "*": permission_object_id = plural_path required_permission = "create" else: required_permission = "write" # In the case of a "POST" on a plural endpoint, if an "id" was # specified, then the object is returned. The required permission # is thus "read" on this object. if request.method.lower() == "post" and self.current_object is not None: permission_object_id = self.get_permission_object_id( request, object_id=self._resource.object_id ) required_permission = "read" return (permission_object_id, required_permission) # FILE kinto/kinto/core/authorization.py class RouteFactory: def __init__(self, request): # Store some shortcuts. permission = request.registry.permission self._check_permission = permission.check_permission self._get_accessible_objects = permission.get_accessible_objects self.get_prefixed_principals = functools.partial(utils.prefixed_principals, request) # Store current resource and required permission. service = utils.current_service(request) is_on_resource = ( service is not None and hasattr(service, "viewset") and hasattr(service, "resource") ) self._resource = None if is_on_resource: self.resource_name = request.current_resource_name self.on_plural_endpoint = getattr(service, "type", None) == "plural" # Check if this request targets an individual object. # Its existence will affect permissions checking (cf `_find_required_permission()`). # There are cases where the permission is not directly related to the HTTP method, # For example: # - with POST on plural endpoint, with an id supplied # - with PUT on an object, which can either be creation or update is_write_on_object = not self.on_plural_endpoint and request.method.lower() in ( "put", "delete", "patch", ) is_post_on_plural = self.on_plural_endpoint and request.method.lower() == "post" if is_write_on_object or is_post_on_plural: # We instantiate the resource to determine the object targeted by the request. self._resource = resource = service.resource(request=request, context=self) if resource.object_id is not None: # Skip POST on plural without id. try: # Save a reference, to avoid refetching from storage in resource. self.current_object = resource.model.get_object(resource.object_id) except storage_exceptions.ObjectNotFoundError: pass self.permission_object_id, self.required_permission = self._find_required_permission( request, service ) # To obtain shared objects on a plural endpoint, use a match: self._object_id_match = self.get_permission_object_id(request, "*") self._settings = request.registry.settings def check_permission(self, principals, bound_perms): """Read allowed principals from settings, if not any, query the permission backend to check if view is allowed. """ ... def fetch_shared_objects(self, perm, principals, get_bound_permissions): """Fetch objects that are readable or writable for the current principals. See :meth:`kinto.core.authorization.AuthorizationPolicy.permits` If no object is shared, it returns None. .. warning:: This sets the ``shared_ids`` attribute to the context with the return value. The attribute is then read by :class:`kinto.core.resource.Resource` """ if get_bound_permissions: bound_perms = get_bound_permissions(self._object_id_match, perm) else: bound_perms = [(self._object_id_match, perm)] by_obj_id = self._get_accessible_objects(principals, bound_perms, with_children=False) ids = by_obj_id.keys() # Store for later use in ``Resource``. self.shared_ids = [self._extract_object_id(id_) for id_ in ids] return self.shared_ids def get_permission_object_id(self, request, object_id=None): """Returns the permission object id for the current request. In the nominal case, it is just the current URI without version prefix. For plural endpoint, it is the related object URI using the specified `object_id`. See :meth:`kinto.core.resource.model.SharableModel` and :meth:`kinto.core.authorization.RouteFactory.__init__` """ ... def _extract_object_id(self, object_uri): ... def _find_required_permission(self, request, service): """Find out what is the permission object id and the required permission. .. note:: This method saves an attribute ``self.current_object`` used in :class:`kinto.core.resource.Resource`. """ ... Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/core/authorization.py import functools import logging from pyramid.authorization import Authenticated from pyramid.interfaces import IAuthorizationPolicy from pyramid.settings import aslist from zope.interface import implementer from kinto.core import utils from kinto.core.storage import exceptions as storage_exceptions def groupfinder(userid, request): """Fetch principals from permission backend for the specified `userid`. This is plugged by default using the ``multiauth.groupfinder`` setting. """
kinto/kinto/core/authorization.py
kinto.core.utils.json.dumps
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE kinto/kinto/core/utils.py def read_env(key, value): """Read the setting key from environment variables. :param key: the setting name :param value: default value if undefined in environment :returns: the value from environment, coerced to python type, or the (uncoerced) default value """ envkey = key.replace(".", "_").replace("-", "_").upper() if envkey in os.environ: return native_value(os.environ[envkey]) return value # LIB jsonpatch.py class JsonPatch(object): def __init__(self, patch, pointer_cls=JsonPointer): self.patch = patch self.pointer_cls = pointer_cls # Verify that the structure of the patch document # is correct by retrieving each patch element. # Much of the validation is done in the initializer # though some is delayed until the patch is applied. for op in self.patch: # We're only checking for basestring in the following check # for two reasons: # # - It should come from JSON, which only allows strings as # dictionary keys, so having a string here unambiguously means # someone used: {"op": ..., ...} instead of [{"op": ..., ...}]. # # - There's no possible false positive: if someone give a sequence # of mappings, this won't raise. if isinstance(op, basestring): raise InvalidJsonPatch("Document is expected to be sequence of " "operations, got a sequence of strings.") self._get_operation(op) def __str__(self): """str(self) -> self.to_string()""" ... def __bool__(self): ... def __iter__(self): ... def __hash__(self): ... def __eq__(self, other): ... def __ne__(self, other): ... def from_string(cls, patch_str, loads=None, pointer_cls=JsonPointer): """Creates JsonPatch instance from string source. :param patch_str: JSON patch as raw string. :type patch_str: str :param loads: A function of one argument that loads a serialized JSON string. :type loads: function :param pointer_cls: JSON pointer class to use. :type pointer_cls: Type[JsonPointer] :return: :class:`JsonPatch` instance. """ ... def from_diff( """Creates JsonPatch instance based on comparison of two document objects. Json patch would be created for `src` argument against `dst` one. :param src: Data source document object. :type src: dict :param dst: Data source document object. :type dst: dict :param dumps: A function of one argument that produces a serialized JSON string. :type dumps: function :param pointer_cls: JSON pointer class to use. :type pointer_cls: Type[JsonPointer] :return: :class:`JsonPatch` instance. >>> src = {'foo': 'bar', 'numbers': [1, 3, 4, 8]} >>> dst = {'baz': 'qux', 'numbers': [1, 4, 7]} >>> patch = JsonPatch.from_diff(src, dst) >>> new = patch.apply(src) >>> new == dst True """ ... def to_string(self, dumps=None): """Returns patch set as JSON string.""" json_dumper = dumps or self.json_dumper return json_dumper(self.patch) def _ops(self): ... def apply(self, obj, in_place=False): """Applies the patch to a given object. :param obj: Document object. :type obj: dict :param in_place: Tweaks the way how patch would be applied - directly to specified `obj` or to its copy. :type in_place: bool :return: Modified `obj`. """ ... def _get_operation(self, operation): ... # FILE kinto/kinto/core/utils.py def native_value(value): """Convert string value to native python values. :param str value: value to interprete. :returns: the value coerced to python type """ if isinstance(value, str): try: value = json.loads(value) except ValueError: return value return value # LIB jsonpatch.py class PatchOperation(object): """A single operation inside a JSON Patch.""" def __init__(self, operation, pointer_cls=JsonPointer): self.pointer_cls = pointer_cls if not operation.__contains__('path'): raise InvalidJsonPatch("Operation must have a 'path' member") if isinstance(operation['path'], self.pointer_cls): self.location = operation['path'].path self.pointer = operation['path'] else: self.location = operation['path'] try: self.pointer = self.pointer_cls(self.location) except TypeError as ex: raise InvalidJsonPatch("Invalid 'path'") self.operation = operation def apply(self, obj): """Abstract method that applies a patch operation to the specified object.""" ... def __hash__(self): ... def __eq__(self, other): ... def __ne__(self, other): ... def path(self): ... def key(self): try: return int(self.pointer.parts[-1]) except ValueError: return self.pointer.parts[-1] def key(self, value): self.pointer.parts[-1] = str(value) self.location = self.pointer.path self.operation['path'] = self.location # LIB memcache.py class Client(threading.local): """Object representing a pool of memcache servers. See L{memcache} for an overview. In all cases where a key is used, the key can be either: 1. A simple hashable type (string, integer, etc.). 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog @group Insertion: set, add, replace, set_multi @group Retrieval: get, get_multi @group Integers: incr, decr @group Removal: delete, delete_multi @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi """ def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, compressor=zlib.compress, decompressor=zlib.decompress, pload=None, pid=None, server_max_key_length=None, server_max_value_length=None, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas=False, flush_on_reconnect=0, check_keys=True): """Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. @param dead_retry: number of seconds before retrying a blacklisted server. Default to 30 s. @param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds. @param cache_cas: (default False) If true, cas operations will be cached. WARNING: This cache is not expired internally, if you have a long-running process you will need to expire it manually via client.reset_cas(), or the cache can grow unlimited. @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) Data that is larger than this will not be sent to the server. @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) Data that is larger than this will not be sent to the server. @param flush_on_reconnect: optional flag which prevents a scenario that can cause stale data to be read: If there's more than one memcached server and the connection to one is interrupted, keys that mapped to that server will get reassigned to another. If the first server comes back, those keys will map to it again. If it still has its data, get()s can read stale data that was overwritten on another server. This flag is off by default for backwards compatibility. @param check_keys: (default True) If True, the key is checked to ensure it is the correct length and composed of the right characters. """ super(Client, self).__init__() self.debug = debug self.dead_retry = dead_retry self.socket_timeout = socket_timeout self.flush_on_reconnect = flush_on_reconnect self.set_servers(servers) self.stats = {} self.cache_cas = cache_cas self.reset_cas() self.do_check_key = check_keys # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.compressor = compressor self.decompressor = decompressor self.persistent_load = pload self.persistent_id = pid self.server_max_key_length = server_max_key_length if self.server_max_key_length is None: self.server_max_key_length = SERVER_MAX_KEY_LENGTH self.server_max_value_length = server_max_value_length if self.server_max_value_length is None: self.server_max_value_length = SERVER_MAX_VALUE_LENGTH # figure out the pickler style file = BytesIO() try: pickler = self.pickler(file, protocol=self.pickleProtocol) self.picklerIsKeyword = True except TypeError: self.picklerIsKeyword = False def _encode_key(self, key): ... def _encode_cmd(self, cmd, key, headers, noreply, *args): ... def reset_cas(self): """Reset the cas cache. This is only used if the Client() object was created with "cache_cas=True". If used, this cache does not expire internally, so it can grow unbounded if you do not clear it yourself. """ ... def set_servers(self, servers): """Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ ... def get_stats(self, stat_args=None): """Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. """ ... def get_slab_stats(self): ... def get_slabs(self): ... def flush_all(self): """Expire all data in memcache servers that are reachable.""" ... def debuglog(self, str): ... def _statlog(self, func): ... def forget_dead_hosts(self): """Reset every host in the pool to an "alive" state.""" ... def _init_buckets(self): ... def _get_server(self, key): ... def disconnect_all(self): ... def delete_multi(self, keys, time=None, key_prefix='', noreply=False): """Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'}) >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @param noreply: optional parameter instructs the server to not send the reply. @return: 1 if no failure in communication with any memcacheds. @rtype: int """ ... def delete(self, key, time=None, noreply=False): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def touch(self, key, time=0, noreply=False): '''Updates the expiration time of a key in memcache. @return: Nonzero on success. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def _deletetouch(self, expected, cmd, key, time=0, noreply=False): ... def incr(self, key, delta=1, noreply=False): """Increment value for C{key} by C{delta} Sends a command to the server to atomically increment the value for C{key} by C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't exist on server, otherwise it returns the new value after incrementing. Note that the value for C{key} must already exist in the memcache, and it must be the string representation of an integer. >>> mc.set("counter", "20") # returns 1, indicating success 1 >>> mc.incr("counter") 21 >>> mc.incr("counter") 22 Overflow on server is not checked. Be aware of values approaching 2**32. See L{decr}. @param delta: Integer amount to increment by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after incrementing, no None for noreply or error. @rtype: int """ ... def decr(self, key, delta=1, noreply=False): """Decrement value for C{key} by C{delta} Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and new values are capped at 0. If server value is 1, a decrement of 2 returns 0, not -1. @param delta: Integer amount to decrement by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after decrementing, or None for noreply or error. @rtype: int """ ... def _incrdecr(self, cmd, key, delta, noreply=False): ... def add(self, key, val, time=0, min_compress_len=0, noreply=False): '''Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' ... def append(self, key, val, time=0, min_compress_len=0, noreply=False): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' return self._set("append", key, val, time, min_compress_len, noreply) def prepend(self, key, val, time=0, min_compress_len=0, noreply=False): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' ... def replace(self, key, val, time=0, min_compress_len=0, noreply=False): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' ... def set(self, key, val, time=0, min_compress_len=0, noreply=False): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def cas(self, key, val, time=0, min_compress_len=0, noreply=False): '''Check and set (CAS) Sets a key to a given value in the memcache if it hasn't been altered since last fetched. (See L{gets}). The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def _map_and_prefix_keys(self, key_iterable, key_prefix): """Map keys to the servers they will reside on. Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of prefixed key -> original key. """ ... def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0, '''Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> keys = mc.get_multi(['key1', 'key2']) >>> keys == {'key1': 'val1', 'key2': 'val2'} True This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, ... key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> keys = mc.get_multi(['subspace_key1', 'subspace_key2']) >>> keys == {'subspace_key1': 'val1', 'subspace_key2': 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' ... def _val_to_store_info(self, val, min_compress_len): """Transform val to a storable representation. Returns a tuple of the flags, the length of the new value, and the new value itself. """ ... def _set(self, cmd, key, val, time, min_compress_len=0, noreply=False): ... def _get(self, cmd, key): ... def get(self, key): '''Retrieves a key from the memcache. @return: The value or None. ''' ... def gets(self, key): '''Retrieves a key from the memcache. Used in conjunction with 'cas'. @return: The value or None. ''' ... def get_multi(self, keys, key_prefix=''): '''Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == { ... "foo": "bar", "baz": 42 ... } 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], ... key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_multi [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', ... 46: 'and 2 just ahead of me'}, ... key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == { ... 42: 'douglass adams', ... 46: 'and 2 just ahead of me' ... } 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the returned dictionary will not have it present. ''' ... def _expect_cas_value(self, server, line=None, raise_exception=False): ... def _expectvalue(self, server, line=None, raise_exception=False): ... def _recv_value(self, server, flags, rlen): ... def check_key(self, key, key_extra_len=0): """Checks sanity of key. Fails if: Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). Contains control characters (Raises MemcachedKeyCharacterError). Is not a string (Raises MemcachedStringEncodingError) Is an unicode string (Raises MemcachedStringEncodingError) Is not a string (Raises MemcachedKeyError) Is None (Raises MemcachedKeyError) """ ... Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/core/utils.py import collections.abc as collections_abc import hashlib import hmac import os import re import time from base64 import b64decode, b64encode from binascii import hexlify from enum import Enum from urllib.parse import unquote import jsonpatch import rapidjson from colander import null from cornice import cors from pyramid import httpexceptions from pyramid.authorization import Authenticated from pyramid.interfaces import IRoutesMapper from pyramid.request import Request, apply_request_extensions from pyramid.settings import aslist from pyramid.view import render_view_to_response import sqlalchemy import memcache from kinto.core import Service class json: def dumps(v, **kw):
kinto/kinto/core/utils.py
kinto.core.utils.json.loads
You are a Python programmer. Here is all the context you may find useful to complete the function: # LIB jsonpatch.py class PatchOperation(object): """A single operation inside a JSON Patch.""" def __init__(self, operation, pointer_cls=JsonPointer): self.pointer_cls = pointer_cls if not operation.__contains__('path'): raise InvalidJsonPatch("Operation must have a 'path' member") if isinstance(operation['path'], self.pointer_cls): self.location = operation['path'].path self.pointer = operation['path'] else: self.location = operation['path'] try: self.pointer = self.pointer_cls(self.location) except TypeError as ex: raise InvalidJsonPatch("Invalid 'path'") self.operation = operation def apply(self, obj): """Abstract method that applies a patch operation to the specified object.""" ... def __hash__(self): ... def __eq__(self, other): ... def __ne__(self, other): ... def path(self): ... def key(self): try: return int(self.pointer.parts[-1]) except ValueError: return self.pointer.parts[-1] def key(self, value): self.pointer.parts[-1] = str(value) self.location = self.pointer.path self.operation['path'] = self.location # LIB memcache.py class Client(threading.local): """Object representing a pool of memcache servers. See L{memcache} for an overview. In all cases where a key is used, the key can be either: 1. A simple hashable type (string, integer, etc.). 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog @group Insertion: set, add, replace, set_multi @group Retrieval: get, get_multi @group Integers: incr, decr @group Removal: delete, delete_multi @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi """ def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, compressor=zlib.compress, decompressor=zlib.decompress, pload=None, pid=None, server_max_key_length=None, server_max_value_length=None, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas=False, flush_on_reconnect=0, check_keys=True): """Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. @param dead_retry: number of seconds before retrying a blacklisted server. Default to 30 s. @param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds. @param cache_cas: (default False) If true, cas operations will be cached. WARNING: This cache is not expired internally, if you have a long-running process you will need to expire it manually via client.reset_cas(), or the cache can grow unlimited. @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) Data that is larger than this will not be sent to the server. @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) Data that is larger than this will not be sent to the server. @param flush_on_reconnect: optional flag which prevents a scenario that can cause stale data to be read: If there's more than one memcached server and the connection to one is interrupted, keys that mapped to that server will get reassigned to another. If the first server comes back, those keys will map to it again. If it still has its data, get()s can read stale data that was overwritten on another server. This flag is off by default for backwards compatibility. @param check_keys: (default True) If True, the key is checked to ensure it is the correct length and composed of the right characters. """ super(Client, self).__init__() self.debug = debug self.dead_retry = dead_retry self.socket_timeout = socket_timeout self.flush_on_reconnect = flush_on_reconnect self.set_servers(servers) self.stats = {} self.cache_cas = cache_cas self.reset_cas() self.do_check_key = check_keys # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.compressor = compressor self.decompressor = decompressor self.persistent_load = pload self.persistent_id = pid self.server_max_key_length = server_max_key_length if self.server_max_key_length is None: self.server_max_key_length = SERVER_MAX_KEY_LENGTH self.server_max_value_length = server_max_value_length if self.server_max_value_length is None: self.server_max_value_length = SERVER_MAX_VALUE_LENGTH # figure out the pickler style file = BytesIO() try: pickler = self.pickler(file, protocol=self.pickleProtocol) self.picklerIsKeyword = True except TypeError: self.picklerIsKeyword = False def _encode_key(self, key): ... def _encode_cmd(self, cmd, key, headers, noreply, *args): ... def reset_cas(self): """Reset the cas cache. This is only used if the Client() object was created with "cache_cas=True". If used, this cache does not expire internally, so it can grow unbounded if you do not clear it yourself. """ ... def set_servers(self, servers): """Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ ... def get_stats(self, stat_args=None): """Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. """ ... def get_slab_stats(self): ... def get_slabs(self): ... def flush_all(self): """Expire all data in memcache servers that are reachable.""" ... def debuglog(self, str): ... def _statlog(self, func): ... def forget_dead_hosts(self): """Reset every host in the pool to an "alive" state.""" ... def _init_buckets(self): ... def _get_server(self, key): ... def disconnect_all(self): ... def delete_multi(self, keys, time=None, key_prefix='', noreply=False): """Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'}) >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @param noreply: optional parameter instructs the server to not send the reply. @return: 1 if no failure in communication with any memcacheds. @rtype: int """ ... def delete(self, key, time=None, noreply=False): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def touch(self, key, time=0, noreply=False): '''Updates the expiration time of a key in memcache. @return: Nonzero on success. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def _deletetouch(self, expected, cmd, key, time=0, noreply=False): ... def incr(self, key, delta=1, noreply=False): """Increment value for C{key} by C{delta} Sends a command to the server to atomically increment the value for C{key} by C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't exist on server, otherwise it returns the new value after incrementing. Note that the value for C{key} must already exist in the memcache, and it must be the string representation of an integer. >>> mc.set("counter", "20") # returns 1, indicating success 1 >>> mc.incr("counter") 21 >>> mc.incr("counter") 22 Overflow on server is not checked. Be aware of values approaching 2**32. See L{decr}. @param delta: Integer amount to increment by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after incrementing, no None for noreply or error. @rtype: int """ ... def decr(self, key, delta=1, noreply=False): """Decrement value for C{key} by C{delta} Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and new values are capped at 0. If server value is 1, a decrement of 2 returns 0, not -1. @param delta: Integer amount to decrement by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after decrementing, or None for noreply or error. @rtype: int """ ... def _incrdecr(self, cmd, key, delta, noreply=False): ... def add(self, key, val, time=0, min_compress_len=0, noreply=False): '''Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' ... def append(self, key, val, time=0, min_compress_len=0, noreply=False): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' ... def prepend(self, key, val, time=0, min_compress_len=0, noreply=False): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' ... def replace(self, key, val, time=0, min_compress_len=0, noreply=False): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' ... def set(self, key, val, time=0, min_compress_len=0, noreply=False): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' return self._set("set", key, val, time, min_compress_len, noreply) def cas(self, key, val, time=0, min_compress_len=0, noreply=False): '''Check and set (CAS) Sets a key to a given value in the memcache if it hasn't been altered since last fetched. (See L{gets}). The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def _map_and_prefix_keys(self, key_iterable, key_prefix): """Map keys to the servers they will reside on. Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of prefixed key -> original key. """ ... def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0, '''Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> keys = mc.get_multi(['key1', 'key2']) >>> keys == {'key1': 'val1', 'key2': 'val2'} True This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, ... key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> keys = mc.get_multi(['subspace_key1', 'subspace_key2']) >>> keys == {'subspace_key1': 'val1', 'subspace_key2': 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' ... def _val_to_store_info(self, val, min_compress_len): """Transform val to a storable representation. Returns a tuple of the flags, the length of the new value, and the new value itself. """ ... def _set(self, cmd, key, val, time, min_compress_len=0, noreply=False): ... def _get(self, cmd, key): ... def get(self, key): '''Retrieves a key from the memcache. @return: The value or None. ''' ... def gets(self, key): '''Retrieves a key from the memcache. Used in conjunction with 'cas'. @return: The value or None. ''' ... def get_multi(self, keys, key_prefix=''): '''Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == { ... "foo": "bar", "baz": 42 ... } 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], ... key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_multi [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', ... 46: 'and 2 just ahead of me'}, ... key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == { ... 42: 'douglass adams', ... 46: 'and 2 just ahead of me' ... } 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the returned dictionary will not have it present. ''' ... def _expect_cas_value(self, server, line=None, raise_exception=False): ... def _expectvalue(self, server, line=None, raise_exception=False): ... def _recv_value(self, server, flags, rlen): ... def check_key(self, key, key_extra_len=0): """Checks sanity of key. Fails if: Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). Contains control characters (Raises MemcachedKeyCharacterError). Is not a string (Raises MemcachedStringEncodingError) Is an unicode string (Raises MemcachedStringEncodingError) Is not a string (Raises MemcachedKeyError) Is None (Raises MemcachedKeyError) """ ... Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/core/utils.py import collections.abc as collections_abc import hashlib import hmac import os import re import time from base64 import b64decode, b64encode from binascii import hexlify from enum import Enum from urllib.parse import unquote import jsonpatch import rapidjson from colander import null from cornice import cors from pyramid import httpexceptions from pyramid.authorization import Authenticated from pyramid.interfaces import IRoutesMapper from pyramid.request import Request, apply_request_extensions from pyramid.settings import aslist from pyramid.view import render_view_to_response import sqlalchemy import memcache from kinto.core import Service class json: def dumps(v, **kw): kw.setdefault("bytes_mode", rapidjson.BM_NONE) return rapidjson.dumps(v, **kw) def load(v, **kw): kw.setdefault("number_mode", rapidjson.NM_NATIVE) return rapidjson.load(v, **kw) def loads(v, **kw):
kinto/kinto/core/utils.py
kinto.core.utils.hmac_digest
You are a Python programmer. Here is all the context you may find useful to complete the function: # LIB jsonpatch.py class PatchOperation(object): """A single operation inside a JSON Patch.""" def __init__(self, operation, pointer_cls=JsonPointer): self.pointer_cls = pointer_cls if not operation.__contains__('path'): raise InvalidJsonPatch("Operation must have a 'path' member") if isinstance(operation['path'], self.pointer_cls): self.location = operation['path'].path self.pointer = operation['path'] else: self.location = operation['path'] try: self.pointer = self.pointer_cls(self.location) except TypeError as ex: raise InvalidJsonPatch("Invalid 'path'") self.operation = operation def apply(self, obj): """Abstract method that applies a patch operation to the specified object.""" ... def __hash__(self): ... def __eq__(self, other): ... def __ne__(self, other): ... def path(self): ... def key(self): try: return int(self.pointer.parts[-1]) except ValueError: return self.pointer.parts[-1] def key(self, value): self.pointer.parts[-1] = str(value) self.location = self.pointer.path self.operation['path'] = self.location Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/core/utils.py import collections.abc as collections_abc import hashlib import hmac import os import re import time from base64 import b64decode, b64encode from binascii import hexlify from enum import Enum from urllib.parse import unquote import jsonpatch import rapidjson from colander import null from cornice import cors from pyramid import httpexceptions from pyramid.authorization import Authenticated from pyramid.interfaces import IRoutesMapper from pyramid.request import Request, apply_request_extensions from pyramid.settings import aslist from pyramid.view import render_view_to_response import sqlalchemy import memcache from kinto.core import Service def hmac_digest(secret, message, encoding="utf-8"): """Return hex digest of a message HMAC using secret"""
kinto/kinto/core/utils.py
kinto.core.utils.current_service
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE kinto/kinto/core/utils.py def build_request(original, dict_obj): """ Transform a dict object into a :class:`pyramid.request.Request` object. It sets a ``parent`` attribute on the resulting request assigned with the `original` request specified. :param original: the original request. :param dict_obj: a dict object with the sub-request specifications. """ api_prefix = "/{}".format(original.upath_info.split("/")[1]) path = dict_obj["path"] if not path.startswith(api_prefix): path = api_prefix + path path = path.encode("utf-8") method = dict_obj.get("method") or "GET" headers = dict(original.headers) headers.update(**dict_obj.get("headers") or {}) # Body can have different length, do not use original header. headers.pop("Content-Length", None) payload = dict_obj.get("body") or "" # Payload is always a dict (from ``BatchRequestSchema.body``). # Send it as JSON for subrequests. if isinstance(payload, dict): headers["Content-Type"] = "application/json; charset=utf-8" payload = json.dumps(payload) request = Request.blank( path=path.decode("latin-1"), headers=headers, POST=payload, method=method ) request.registry = original.registry apply_request_extensions(request) # This is used to distinguish subrequests from direct incoming requests. # See :func:`kinto.core.initialization.setup_logging()` request.parent = original return request # LIB pyramid class IRoutesMapper(Interface): """Interface representing a Routes ``Mapper`` object""" def get_routes(): """Return a sequence of Route objects registered in the mapper. Static routes will not be returned in this sequence.""" ... def has_routes(): """Returns ``True`` if any route has been registered.""" ... def get_route(name): """Returns an ``IRoute`` object if a route with the name ``name`` was registered, otherwise return ``None``.""" ... def connect( """Add a new route.""" ... def generate(name, kw): """Generate a URL using the route named ``name`` with the keywords implied by kw""" ... def __call__(request): """Return a dictionary containing matching information for the request; the ``route`` key of this dictionary will either be a Route object or ``None`` if no route matched; the ``match`` key will be the matchdict or ``None`` if no route matched. Static routes will not be considered for matching.""" # FILE kinto/kinto/core/utils.py def current_resource_name(request): """Return the name used when the kinto.core resource was registered along its viewset. :returns: the resource identifier. :rtype: str """ service = current_service(request) resource_name = service.viewset.get_name(service.resource) return resource_name # LIB pyramid class IRoutesMapper(Interface): """Interface representing a Routes ``Mapper`` object""" def get_routes(): """Return a sequence of Route objects registered in the mapper. Static routes will not be returned in this sequence.""" def has_routes(): """Returns ``True`` if any route has been registered.""" def get_route(name): """Returns an ``IRoute`` object if a route with the name ``name`` was registered, otherwise return ``None``.""" def connect( name, pattern, factory=None, predicates=(), pregenerator=None, static=True, ): """Add a new route.""" def generate(name, kw): """Generate a URL using the route named ``name`` with the keywords implied by kw""" def __call__(request): """Return a dictionary containing matching information for the request; the ``route`` key of this dictionary will either be a Route object or ``None`` if no route matched; the ``match`` key will be the matchdict or ``None`` if no route matched. Static routes will not be considered for matching.""" # LIB pyramid class Request( BaseRequest, URLMethodsMixin, CallbackMethodsMixin, InstancePropertyMixin, LocalizerRequestMixin, SecurityAPIMixin, AuthenticationAPIMixin, ViewMethodsMixin, ): """ A subclass of the :term:`WebOb` Request class. An instance of this class is created by the :term:`router` and is provided to a view callable (and to other subsystems) as the ``request`` argument. The documentation below (save for the ``add_response_callback`` and ``add_finished_callback`` methods, which are defined in this subclass itself, and the attributes ``context``, ``registry``, ``root``, ``subpath``, ``traversed``, ``view_name``, ``virtual_root`` , and ``virtual_root_path``, each of which is added to the request by the :term:`router` at request ingress time) are autogenerated from the WebOb source code used when this documentation was generated. Due to technical constraints, we can't yet display the WebOb version number from which this documentation is autogenerated, but it will be the 'prevailing WebOb version' at the time of the release of this :app:`Pyramid` version. See https://webob.org/ for further information. """ exception = None exc_info = None matchdict = None matched_route = None request_iface = IRequest ResponseClass = Response @reify def tmpl_context(self): # docs-deprecated template context for Pylons-like apps; do not # remove. return TemplateContext() @reify def session(self): """Obtain the :term:`session` object associated with this request. If a :term:`session factory` has not been registered during application configuration, a :class:`pyramid.exceptions.ConfigurationError` will be raised""" factory = self.registry.queryUtility(ISessionFactory) if factory is None: raise AttributeError( 'No session factory registered ' '(see the Sessions chapter of the Pyramid documentation)' ) return factory(self) @reify def response(self): """This attribute is actually a "reified" property which returns an instance of the :class:`pyramid.response.Response`. class. The response object returned does not exist until this attribute is accessed. Subsequent accesses will return the same Response object. The ``request.response`` API is used by renderers. A render obtains the response object it will return from a view that uses that renderer by accessing ``request.response``. Therefore, it's possible to use the ``request.response`` API to set up a response object with "the right" attributes (e.g. by calling ``request.response.set_cookie()``) within a view that uses a renderer. Mutations to this response object will be preserved in the response sent to the client.""" response_factory = _get_response_factory(self.registry) return response_factory(self) def is_response(self, ob): """Return ``True`` if the object passed as ``ob`` is a valid response object, ``False`` otherwise.""" if ob.__class__ is Response: return True registry = self.registry adapted = registry.queryAdapterOrSelf(ob, IResponse) if adapted is None: return False return adapted is ob # FILE kinto/kinto/core/utils.py def view_lookup(request, uri): """ A convenience method for view_lookup_registry when you have a request. :param request: the current request (used to obtain registry). :param uri: a plural or object endpoint URI. :rtype: tuple :returns: the resource name and the associated matchdict. """ return view_lookup_registry(request.registry, uri) # FILE kinto/kinto/core/utils.py def view_lookup_registry(registry, uri): """ Look-up the specified `uri` and return the associated resource name along the match dict. :param registry: the application's registry. :param uri: a plural or object endpoint URI. :rtype: tuple :returns: the resource name and the associated matchdict. """ api_prefix = f"/{registry.route_prefix}" path = api_prefix + uri q = registry.queryUtility routes_mapper = q(IRoutesMapper) fakerequest = Request.blank(path=path) info = routes_mapper(fakerequest) matchdict, route = info["match"], info["route"] if route is None: raise ValueError("URI has no route") resource_name = route.name.replace("-object", "").replace("-plural", "") return resource_name, matchdict # LIB jsonpatch.py class PatchOperation(object): """A single operation inside a JSON Patch.""" def __init__(self, operation, pointer_cls=JsonPointer): self.pointer_cls = pointer_cls if not operation.__contains__('path'): raise InvalidJsonPatch("Operation must have a 'path' member") if isinstance(operation['path'], self.pointer_cls): self.location = operation['path'].path self.pointer = operation['path'] else: self.location = operation['path'] try: self.pointer = self.pointer_cls(self.location) except TypeError as ex: raise InvalidJsonPatch("Invalid 'path'") self.operation = operation def apply(self, obj): """Abstract method that applies a patch operation to the specified object.""" ... def __hash__(self): ... def __eq__(self, other): ... def __ne__(self, other): ... def path(self): return '/'.join(self.pointer.parts[:-1]) def key(self): ... def key(self, value): ... # LIB memcache.py class Client(threading.local): """Object representing a pool of memcache servers. See L{memcache} for an overview. In all cases where a key is used, the key can be either: 1. A simple hashable type (string, integer, etc.). 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog @group Insertion: set, add, replace, set_multi @group Retrieval: get, get_multi @group Integers: incr, decr @group Removal: delete, delete_multi @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi """ def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, compressor=zlib.compress, decompressor=zlib.decompress, pload=None, pid=None, server_max_key_length=None, server_max_value_length=None, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas=False, flush_on_reconnect=0, check_keys=True): """Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. @param dead_retry: number of seconds before retrying a blacklisted server. Default to 30 s. @param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds. @param cache_cas: (default False) If true, cas operations will be cached. WARNING: This cache is not expired internally, if you have a long-running process you will need to expire it manually via client.reset_cas(), or the cache can grow unlimited. @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) Data that is larger than this will not be sent to the server. @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) Data that is larger than this will not be sent to the server. @param flush_on_reconnect: optional flag which prevents a scenario that can cause stale data to be read: If there's more than one memcached server and the connection to one is interrupted, keys that mapped to that server will get reassigned to another. If the first server comes back, those keys will map to it again. If it still has its data, get()s can read stale data that was overwritten on another server. This flag is off by default for backwards compatibility. @param check_keys: (default True) If True, the key is checked to ensure it is the correct length and composed of the right characters. """ super(Client, self).__init__() self.debug = debug self.dead_retry = dead_retry self.socket_timeout = socket_timeout self.flush_on_reconnect = flush_on_reconnect self.set_servers(servers) self.stats = {} self.cache_cas = cache_cas self.reset_cas() self.do_check_key = check_keys # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.compressor = compressor self.decompressor = decompressor self.persistent_load = pload self.persistent_id = pid self.server_max_key_length = server_max_key_length if self.server_max_key_length is None: self.server_max_key_length = SERVER_MAX_KEY_LENGTH self.server_max_value_length = server_max_value_length if self.server_max_value_length is None: self.server_max_value_length = SERVER_MAX_VALUE_LENGTH # figure out the pickler style file = BytesIO() try: pickler = self.pickler(file, protocol=self.pickleProtocol) self.picklerIsKeyword = True except TypeError: self.picklerIsKeyword = False def _encode_key(self, key): ... def _encode_cmd(self, cmd, key, headers, noreply, *args): ... def reset_cas(self): """Reset the cas cache. This is only used if the Client() object was created with "cache_cas=True". If used, this cache does not expire internally, so it can grow unbounded if you do not clear it yourself. """ ... def set_servers(self, servers): """Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ ... def get_stats(self, stat_args=None): """Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. """ ... def get_slab_stats(self): ... def get_slabs(self): ... def flush_all(self): """Expire all data in memcache servers that are reachable.""" ... def debuglog(self, str): ... def _statlog(self, func): ... def forget_dead_hosts(self): """Reset every host in the pool to an "alive" state.""" ... def _init_buckets(self): ... def _get_server(self, key): ... def disconnect_all(self): ... def delete_multi(self, keys, time=None, key_prefix='', noreply=False): """Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'}) >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @param noreply: optional parameter instructs the server to not send the reply. @return: 1 if no failure in communication with any memcacheds. @rtype: int """ ... def delete(self, key, time=None, noreply=False): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def touch(self, key, time=0, noreply=False): '''Updates the expiration time of a key in memcache. @return: Nonzero on success. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def _deletetouch(self, expected, cmd, key, time=0, noreply=False): ... def incr(self, key, delta=1, noreply=False): """Increment value for C{key} by C{delta} Sends a command to the server to atomically increment the value for C{key} by C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't exist on server, otherwise it returns the new value after incrementing. Note that the value for C{key} must already exist in the memcache, and it must be the string representation of an integer. >>> mc.set("counter", "20") # returns 1, indicating success 1 >>> mc.incr("counter") 21 >>> mc.incr("counter") 22 Overflow on server is not checked. Be aware of values approaching 2**32. See L{decr}. @param delta: Integer amount to increment by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after incrementing, no None for noreply or error. @rtype: int """ ... def decr(self, key, delta=1, noreply=False): """Decrement value for C{key} by C{delta} Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and new values are capped at 0. If server value is 1, a decrement of 2 returns 0, not -1. @param delta: Integer amount to decrement by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after decrementing, or None for noreply or error. @rtype: int """ ... def _incrdecr(self, cmd, key, delta, noreply=False): ... def add(self, key, val, time=0, min_compress_len=0, noreply=False): '''Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' ... def append(self, key, val, time=0, min_compress_len=0, noreply=False): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' ... def prepend(self, key, val, time=0, min_compress_len=0, noreply=False): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' ... def replace(self, key, val, time=0, min_compress_len=0, noreply=False): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' ... def set(self, key, val, time=0, min_compress_len=0, noreply=False): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def cas(self, key, val, time=0, min_compress_len=0, noreply=False): '''Check and set (CAS) Sets a key to a given value in the memcache if it hasn't been altered since last fetched. (See L{gets}). The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def _map_and_prefix_keys(self, key_iterable, key_prefix): """Map keys to the servers they will reside on. Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of prefixed key -> original key. """ ... def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0, '''Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> keys = mc.get_multi(['key1', 'key2']) >>> keys == {'key1': 'val1', 'key2': 'val2'} True This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, ... key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> keys = mc.get_multi(['subspace_key1', 'subspace_key2']) >>> keys == {'subspace_key1': 'val1', 'subspace_key2': 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' ... def _val_to_store_info(self, val, min_compress_len): """Transform val to a storable representation. Returns a tuple of the flags, the length of the new value, and the new value itself. """ ... def _set(self, cmd, key, val, time, min_compress_len=0, noreply=False): ... def _get(self, cmd, key): ... def get(self, key): '''Retrieves a key from the memcache. @return: The value or None. ''' return self._get('get', key) def gets(self, key): '''Retrieves a key from the memcache. Used in conjunction with 'cas'. @return: The value or None. ''' ... def get_multi(self, keys, key_prefix=''): '''Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == { ... "foo": "bar", "baz": 42 ... } 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], ... key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_multi [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', ... 46: 'and 2 just ahead of me'}, ... key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == { ... 42: 'douglass adams', ... 46: 'and 2 just ahead of me' ... } 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the returned dictionary will not have it present. ''' ... def _expect_cas_value(self, server, line=None, raise_exception=False): ... def _expectvalue(self, server, line=None, raise_exception=False): ... def _recv_value(self, server, flags, rlen): ... def check_key(self, key, key_extra_len=0): """Checks sanity of key. Fails if: Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). Contains control characters (Raises MemcachedKeyCharacterError). Is not a string (Raises MemcachedStringEncodingError) Is an unicode string (Raises MemcachedStringEncodingError) Is not a string (Raises MemcachedKeyError) Is None (Raises MemcachedKeyError) """ ... Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/core/utils.py import collections.abc as collections_abc import hashlib import hmac import os import re import time from base64 import b64decode, b64encode from binascii import hexlify from enum import Enum from urllib.parse import unquote import jsonpatch import rapidjson from colander import null from cornice import cors from pyramid import httpexceptions from pyramid.authorization import Authenticated from pyramid.interfaces import IRoutesMapper from pyramid.request import Request, apply_request_extensions from pyramid.settings import aslist from pyramid.view import render_view_to_response import sqlalchemy import memcache from kinto.core import Service def current_service(request): """Return the Cornice service matching the specified request. :returns: the service or None if unmatched. :rtype: cornice.Service """
kinto/kinto/core/utils.py
kinto.core.utils.prefixed_principals
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE kinto/kinto/core/utils.py def prefixed_userid(request): """In Kinto users ids are prefixed with the policy name that is contained in Pyramid Multiauth. If a custom authn policy is used, without authn_type, this method returns the user id without prefix. """ # If pyramid_multiauth is used, a ``authn_type`` is set on request # when a policy succesfully authenticates a user. # (see :func:`kinto.core.initialization.setup_authentication`) authn_type = getattr(request, "authn_type", None) if authn_type is not None: return f"{authn_type}:{request.selected_userid}" # LIB memcache.py class Client(threading.local): """Object representing a pool of memcache servers. See L{memcache} for an overview. In all cases where a key is used, the key can be either: 1. A simple hashable type (string, integer, etc.). 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog @group Insertion: set, add, replace, set_multi @group Retrieval: get, get_multi @group Integers: incr, decr @group Removal: delete, delete_multi @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi """ def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, compressor=zlib.compress, decompressor=zlib.decompress, pload=None, pid=None, server_max_key_length=None, server_max_value_length=None, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas=False, flush_on_reconnect=0, check_keys=True): """Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. @param dead_retry: number of seconds before retrying a blacklisted server. Default to 30 s. @param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds. @param cache_cas: (default False) If true, cas operations will be cached. WARNING: This cache is not expired internally, if you have a long-running process you will need to expire it manually via client.reset_cas(), or the cache can grow unlimited. @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) Data that is larger than this will not be sent to the server. @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) Data that is larger than this will not be sent to the server. @param flush_on_reconnect: optional flag which prevents a scenario that can cause stale data to be read: If there's more than one memcached server and the connection to one is interrupted, keys that mapped to that server will get reassigned to another. If the first server comes back, those keys will map to it again. If it still has its data, get()s can read stale data that was overwritten on another server. This flag is off by default for backwards compatibility. @param check_keys: (default True) If True, the key is checked to ensure it is the correct length and composed of the right characters. """ super(Client, self).__init__() self.debug = debug self.dead_retry = dead_retry self.socket_timeout = socket_timeout self.flush_on_reconnect = flush_on_reconnect self.set_servers(servers) self.stats = {} self.cache_cas = cache_cas self.reset_cas() self.do_check_key = check_keys # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.compressor = compressor self.decompressor = decompressor self.persistent_load = pload self.persistent_id = pid self.server_max_key_length = server_max_key_length if self.server_max_key_length is None: self.server_max_key_length = SERVER_MAX_KEY_LENGTH self.server_max_value_length = server_max_value_length if self.server_max_value_length is None: self.server_max_value_length = SERVER_MAX_VALUE_LENGTH # figure out the pickler style file = BytesIO() try: pickler = self.pickler(file, protocol=self.pickleProtocol) self.picklerIsKeyword = True except TypeError: self.picklerIsKeyword = False def _encode_key(self, key): ... def _encode_cmd(self, cmd, key, headers, noreply, *args): ... def reset_cas(self): """Reset the cas cache. This is only used if the Client() object was created with "cache_cas=True". If used, this cache does not expire internally, so it can grow unbounded if you do not clear it yourself. """ ... def set_servers(self, servers): """Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ ... def get_stats(self, stat_args=None): """Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. """ ... def get_slab_stats(self): ... def get_slabs(self): ... def flush_all(self): """Expire all data in memcache servers that are reachable.""" ... def debuglog(self, str): ... def _statlog(self, func): ... def forget_dead_hosts(self): """Reset every host in the pool to an "alive" state.""" ... def _init_buckets(self): ... def _get_server(self, key): ... def disconnect_all(self): ... def delete_multi(self, keys, time=None, key_prefix='', noreply=False): """Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'}) >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @param noreply: optional parameter instructs the server to not send the reply. @return: 1 if no failure in communication with any memcacheds. @rtype: int """ ... def delete(self, key, time=None, noreply=False): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def touch(self, key, time=0, noreply=False): '''Updates the expiration time of a key in memcache. @return: Nonzero on success. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def _deletetouch(self, expected, cmd, key, time=0, noreply=False): ... def incr(self, key, delta=1, noreply=False): """Increment value for C{key} by C{delta} Sends a command to the server to atomically increment the value for C{key} by C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't exist on server, otherwise it returns the new value after incrementing. Note that the value for C{key} must already exist in the memcache, and it must be the string representation of an integer. >>> mc.set("counter", "20") # returns 1, indicating success 1 >>> mc.incr("counter") 21 >>> mc.incr("counter") 22 Overflow on server is not checked. Be aware of values approaching 2**32. See L{decr}. @param delta: Integer amount to increment by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after incrementing, no None for noreply or error. @rtype: int """ ... def decr(self, key, delta=1, noreply=False): """Decrement value for C{key} by C{delta} Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and new values are capped at 0. If server value is 1, a decrement of 2 returns 0, not -1. @param delta: Integer amount to decrement by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after decrementing, or None for noreply or error. @rtype: int """ ... def _incrdecr(self, cmd, key, delta, noreply=False): ... def add(self, key, val, time=0, min_compress_len=0, noreply=False): '''Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' ... def append(self, key, val, time=0, min_compress_len=0, noreply=False): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' return self._set("append", key, val, time, min_compress_len, noreply) def prepend(self, key, val, time=0, min_compress_len=0, noreply=False): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' ... def replace(self, key, val, time=0, min_compress_len=0, noreply=False): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' ... def set(self, key, val, time=0, min_compress_len=0, noreply=False): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def cas(self, key, val, time=0, min_compress_len=0, noreply=False): '''Check and set (CAS) Sets a key to a given value in the memcache if it hasn't been altered since last fetched. (See L{gets}). The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def _map_and_prefix_keys(self, key_iterable, key_prefix): """Map keys to the servers they will reside on. Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of prefixed key -> original key. """ ... def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0, '''Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> keys = mc.get_multi(['key1', 'key2']) >>> keys == {'key1': 'val1', 'key2': 'val2'} True This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, ... key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> keys = mc.get_multi(['subspace_key1', 'subspace_key2']) >>> keys == {'subspace_key1': 'val1', 'subspace_key2': 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' ... def _val_to_store_info(self, val, min_compress_len): """Transform val to a storable representation. Returns a tuple of the flags, the length of the new value, and the new value itself. """ ... def _set(self, cmd, key, val, time, min_compress_len=0, noreply=False): ... def _get(self, cmd, key): ... def get(self, key): '''Retrieves a key from the memcache. @return: The value or None. ''' ... def gets(self, key): '''Retrieves a key from the memcache. Used in conjunction with 'cas'. @return: The value or None. ''' ... def get_multi(self, keys, key_prefix=''): '''Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == { ... "foo": "bar", "baz": 42 ... } 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], ... key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_multi [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', ... 46: 'and 2 just ahead of me'}, ... key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == { ... 42: 'douglass adams', ... 46: 'and 2 just ahead of me' ... } 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the returned dictionary will not have it present. ''' ... def _expect_cas_value(self, server, line=None, raise_exception=False): ... def _expectvalue(self, server, line=None, raise_exception=False): ... def _recv_value(self, server, flags, rlen): ... def check_key(self, key, key_extra_len=0): """Checks sanity of key. Fails if: Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). Contains control characters (Raises MemcachedKeyCharacterError). Is not a string (Raises MemcachedStringEncodingError) Is an unicode string (Raises MemcachedStringEncodingError) Is not a string (Raises MemcachedKeyError) Is None (Raises MemcachedKeyError) """ ... # LIB memcache.py class Client(threading.local): """Object representing a pool of memcache servers. See L{memcache} for an overview. In all cases where a key is used, the key can be either: 1. A simple hashable type (string, integer, etc.). 2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog @group Insertion: set, add, replace, set_multi @group Retrieval: get, get_multi @group Integers: incr, decr @group Removal: delete, delete_multi @sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\ set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi """ def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, compressor=zlib.compress, decompressor=zlib.decompress, pload=None, pid=None, server_max_key_length=None, server_max_value_length=None, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas=False, flush_on_reconnect=0, check_keys=True): """Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. @param dead_retry: number of seconds before retrying a blacklisted server. Default to 30 s. @param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds. @param cache_cas: (default False) If true, cas operations will be cached. WARNING: This cache is not expired internally, if you have a long-running process you will need to expire it manually via client.reset_cas(), or the cache can grow unlimited. @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) Data that is larger than this will not be sent to the server. @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) Data that is larger than this will not be sent to the server. @param flush_on_reconnect: optional flag which prevents a scenario that can cause stale data to be read: If there's more than one memcached server and the connection to one is interrupted, keys that mapped to that server will get reassigned to another. If the first server comes back, those keys will map to it again. If it still has its data, get()s can read stale data that was overwritten on another server. This flag is off by default for backwards compatibility. @param check_keys: (default True) If True, the key is checked to ensure it is the correct length and composed of the right characters. """ super(Client, self).__init__() self.debug = debug self.dead_retry = dead_retry self.socket_timeout = socket_timeout self.flush_on_reconnect = flush_on_reconnect self.set_servers(servers) self.stats = {} self.cache_cas = cache_cas self.reset_cas() self.do_check_key = check_keys # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.compressor = compressor self.decompressor = decompressor self.persistent_load = pload self.persistent_id = pid self.server_max_key_length = server_max_key_length if self.server_max_key_length is None: self.server_max_key_length = SERVER_MAX_KEY_LENGTH self.server_max_value_length = server_max_value_length if self.server_max_value_length is None: self.server_max_value_length = SERVER_MAX_VALUE_LENGTH # figure out the pickler style file = BytesIO() try: pickler = self.pickler(file, protocol=self.pickleProtocol) self.picklerIsKeyword = True except TypeError: self.picklerIsKeyword = False def _encode_key(self, key): ... def _encode_cmd(self, cmd, key, headers, noreply, *args): ... def reset_cas(self): """Reset the cas cache. This is only used if the Client() object was created with "cache_cas=True". If used, this cache does not expire internally, so it can grow unbounded if you do not clear it yourself. """ ... def set_servers(self, servers): """Set the pool of servers used by this client. @param servers: an array of servers. Servers can be passed in two forms: 1. Strings of the form C{"host:port"}, which implies a default weight of 1. 2. Tuples of the form C{("host:port", weight)}, where C{weight} is an integer weight value. """ ... def get_stats(self, stat_args=None): """Get statistics from each of the servers. @param stat_args: Additional arguments to pass to the memcache "stats" command. @return: A list of tuples ( server_identifier, stats_dictionary ). The dictionary contains a number of name/value pairs specifying the name of the status field and the string value associated with it. The values are not converted from strings. """ ... def get_slab_stats(self): ... def get_slabs(self): ... def flush_all(self): """Expire all data in memcache servers that are reachable.""" ... def debuglog(self, str): ... def _statlog(self, func): ... def forget_dead_hosts(self): """Reset every host in the pool to an "alive" state.""" ... def _init_buckets(self): ... def _get_server(self, key): ... def disconnect_all(self): ... def delete_multi(self, keys, time=None, key_prefix='', noreply=False): """Delete multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'}) >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'} 1 >>> mc.delete_multi(['key1', 'key2']) 1 >>> mc.get_multi(['key1', 'key2']) == {} 1 This method is recommended over iterated regular L{delete}s as it reduces total latency, since your app doesn't have to wait for each round-trip of L{delete} before sending the next one. @param keys: An iterable of keys to clear @param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay. @param key_prefix: Optional string to prepend to each key when sending to memcache. See docs for L{get_multi} and L{set_multi}. @param noreply: optional parameter instructs the server to not send the reply. @return: 1 if no failure in communication with any memcacheds. @rtype: int """ ... def delete(self, key, time=None, noreply=False): '''Deletes a key from the memcache. @return: Nonzero on success. @param time: number of seconds any subsequent set / update commands should fail. Defaults to None for no delay. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def touch(self, key, time=0, noreply=False): '''Updates the expiration time of a key in memcache. @return: Nonzero on success. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param noreply: optional parameter instructs the server to not send the reply. @rtype: int ''' ... def _deletetouch(self, expected, cmd, key, time=0, noreply=False): ... def incr(self, key, delta=1, noreply=False): """Increment value for C{key} by C{delta} Sends a command to the server to atomically increment the value for C{key} by C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't exist on server, otherwise it returns the new value after incrementing. Note that the value for C{key} must already exist in the memcache, and it must be the string representation of an integer. >>> mc.set("counter", "20") # returns 1, indicating success 1 >>> mc.incr("counter") 21 >>> mc.incr("counter") 22 Overflow on server is not checked. Be aware of values approaching 2**32. See L{decr}. @param delta: Integer amount to increment by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after incrementing, no None for noreply or error. @rtype: int """ ... def decr(self, key, delta=1, noreply=False): """Decrement value for C{key} by C{delta} Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and new values are capped at 0. If server value is 1, a decrement of 2 returns 0, not -1. @param delta: Integer amount to decrement by (should be zero or greater). @param noreply: optional parameter instructs the server to not send the reply. @return: New value after decrementing, or None for noreply or error. @rtype: int """ ... def _incrdecr(self, cmd, key, delta, noreply=False): ... def add(self, key, val, time=0, min_compress_len=0, noreply=False): '''Add new key with value. Like L{set}, but only stores in memcache if the key doesn't already exist. @return: Nonzero on success. @rtype: int ''' ... def append(self, key, val, time=0, min_compress_len=0, noreply=False): '''Append the value to the end of the existing key's value. Only stores in memcache if key already exists. Also see L{prepend}. @return: Nonzero on success. @rtype: int ''' ... def prepend(self, key, val, time=0, min_compress_len=0, noreply=False): '''Prepend the value to the beginning of the existing key's value. Only stores in memcache if key already exists. Also see L{append}. @return: Nonzero on success. @rtype: int ''' ... def replace(self, key, val, time=0, min_compress_len=0, noreply=False): '''Replace existing key with value. Like L{set}, but only stores in memcache if the key already exists. The opposite of L{add}. @return: Nonzero on success. @rtype: int ''' ... def set(self, key, val, time=0, min_compress_len=0, noreply=False): '''Unconditionally sets a key to a given value in the memcache. The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def cas(self, key, val, time=0, min_compress_len=0, noreply=False): '''Check and set (CAS) Sets a key to a given value in the memcache if it hasn't been altered since last fetched. (See L{gets}). The C{key} can optionally be an tuple, with the first element being the server hash value and the second being the key. If you want to avoid making this module calculate a hash value. You may prefer, for example, to keep all of a given user's objects on the same memcache server, so you could use the user's unique id as the hash value. @return: Nonzero on success. @rtype: int @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. ''' ... def _map_and_prefix_keys(self, key_iterable, key_prefix): """Map keys to the servers they will reside on. Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of prefixed key -> original key. """ ... def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0, '''Sets multiple keys in the memcache doing just one query. >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}) >>> keys = mc.get_multi(['key1', 'key2']) >>> keys == {'key1': 'val1', 'key2': 'val2'} True This method is recommended over regular L{set} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{set} before sending the next one. @param mapping: A dict of key/value pairs to set. @param time: Tells memcached the time which this value should expire, either as a delta number of seconds, or an absolute unix time-since-the-epoch value. See the memcached protocol docs section "Storage Commands" for more info on <exptime>. We default to 0 == cache forever. @param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache: >>> notset_keys = mc.set_multi( ... {'key1' : 'val1', 'key2' : 'val2'}, ... key_prefix='subspace_') >>> len(notset_keys) == 0 True >>> keys = mc.get_multi(['subspace_key1', 'subspace_key2']) >>> keys == {'subspace_key1': 'val1', 'subspace_key2': 'val2'} True Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache. In this case, the return result would be the list of notset original keys, prefix not applied. @param min_compress_len: The threshold length to kick in auto-compression of the value using the compressor routine. If the value being cached is a string, then the length of the string is measured, else if the value is an object, then the length of the pickle result is measured. If the resulting attempt at compression yields a larger string than the input, then it is discarded. For backwards compatibility, this parameter defaults to 0, indicating don't ever try to compress. @param noreply: optional parameter instructs the server to not send the reply. @return: List of keys which failed to be stored [ memcache out of memory, etc. ]. @rtype: list ''' ... def _val_to_store_info(self, val, min_compress_len): """Transform val to a storable representation. Returns a tuple of the flags, the length of the new value, and the new value itself. """ ... def _set(self, cmd, key, val, time, min_compress_len=0, noreply=False): ... def _get(self, cmd, key): ... def get(self, key): '''Retrieves a key from the memcache. @return: The value or None. ''' return self._get('get', key) def gets(self, key): '''Retrieves a key from the memcache. Used in conjunction with 'cas'. @return: The value or None. ''' ... def get_multi(self, keys, key_prefix=''): '''Retrieves multiple keys from the memcache doing just one query. >>> success = mc.set("foo", "bar") >>> success = mc.set("baz", 42) >>> mc.get_multi(["foo", "baz", "foobar"]) == { ... "foo": "bar", "baz": 42 ... } 1 >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == [] 1 This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'. >>> mc.get_multi(['k1', 'k2', 'nonexist'], ... key_prefix='pfx_') == {'k1' : 1, 'k2' : 2} 1 get_multi [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields. They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix. In this mode, the key_prefix could be a table name, and the key itself a db primary key number. >>> mc.set_multi({42: 'douglass adams', ... 46: 'and 2 just ahead of me'}, ... key_prefix='numkeys_') == [] 1 >>> mc.get_multi([46, 42], key_prefix='numkeys_') == { ... 42: 'douglass adams', ... 46: 'and 2 just ahead of me' ... } 1 This method is recommended over regular L{get} as it lowers the number of total packets flying around your network, reducing total latency, since your app doesn't have to wait for each round-trip of L{get} before sending the next one. See also L{set_multi}. @param keys: An array of keys. @param key_prefix: A string to prefix each key when we communicate with memcache. Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix. @return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the returned dictionary will not have it present. ''' ... def _expect_cas_value(self, server, line=None, raise_exception=False): ... def _expectvalue(self, server, line=None, raise_exception=False): ... def _recv_value(self, server, flags, rlen): ... def check_key(self, key, key_extra_len=0): """Checks sanity of key. Fails if: Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength). Contains control characters (Raises MemcachedKeyCharacterError). Is not a string (Raises MemcachedStringEncodingError) Is an unicode string (Raises MemcachedStringEncodingError) Is not a string (Raises MemcachedKeyError) Is None (Raises MemcachedKeyError) """ ... Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/core/utils.py import collections.abc as collections_abc import hashlib import hmac import os import re import time from base64 import b64decode, b64encode from binascii import hexlify from enum import Enum from urllib.parse import unquote import jsonpatch import rapidjson from colander import null from cornice import cors from pyramid import httpexceptions from pyramid.authorization import Authenticated from pyramid.interfaces import IRoutesMapper from pyramid.request import Request, apply_request_extensions from pyramid.settings import aslist from pyramid.view import render_view_to_response import sqlalchemy import memcache from kinto.core import Service def prefixed_principals(request): """ :returns: the list principals with prefixed user id. """
kinto/kinto/core/utils.py
kinto.plugins.accounts.views.on_account_created
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE kinto/kinto/plugins/accounts/views/__init__.py def process_object(self, new, old=None): new = super(Account, self).process_object(new, old) if "data" in self.request.json and "password" in self.request.json["data"]: new["password"] = hash_password(new["password"]) # Do not let accounts be created without usernames. if self.model.id_field not in new: error_details = {"name": "data.id", "description": "Accounts must have an ID."} raise_invalid(self.request, **error_details) # Account validation requires that the record ID is an email address. # TODO: this might be better suited for a schema. Do we have a way to # dynamically change the schema according to the settings? if self.context.validation_enabled and old is None: email_regexp = self.context.validation_email_regexp # Account validation requires that the record ID is an email address. user_email = new[self.model.id_field] if not email_regexp.match(user_email): error_details = { "name": "data.id", "description": f"Account validation is enabled, and user id should match {email_regexp}", } raise_invalid(self.request, **error_details) activation_key = str(uuid.uuid4()) new["validated"] = False # Store the activation key in the cache to be used in the `validate` endpoint. cache_validation_key(activation_key, new["id"], self.request.registry) # Administrators can reach other accounts and anonymous have no # selected_userid. So do not try to enforce. if self.context.is_administrator or self.context.is_anonymous: return new # Otherwise, we force the id to match the authenticated username. if new[self.model.id_field] != self.request.selected_userid: error_details = { "name": "data.id", "description": "Username and account ID do not match.", } raise_invalid(self.request, **error_details) return new # FILE kinto/kinto/plugins/accounts/views/__init__.py def on_account_changed(event): request = event.request for obj in event.impacted_objects: # Extract username and password from current user username = obj["old"]["id"] # Delete cache delete_cached_account(username, request.registry) # FILE kinto/kinto/core/events.py class ResourceChanged(_ResourceEvent): """Triggered when a resource is being changed.""" def __init__(self, payload, impacted_objects, request): super().__init__(payload, request) self.impacted_objects = impacted_objects # FILE kinto/kinto/core/events.py class ResourceChanged(_ResourceEvent): """Triggered when a resource is being changed.""" def __init__(self, payload, impacted_objects, request): super().__init__(payload, request) self.impacted_objects = impacted_objects # FILE kinto/kinto/plugins/accounts/views/__init__.py class Account(resource.Resource): schema = AccountSchema def __init__(self, request, context): settings = request.registry.settings # Store if current user is administrator (before accessing get_parent_id()) allowed_from_settings = settings.get("account_write_principals", []) context.is_administrator = ( len(set(aslist(allowed_from_settings)) & set(request.prefixed_principals)) > 0 ) # Shortcut to check if current is anonymous (before get_parent_id()). context.is_anonymous = Authenticated not in request.effective_principals # Is the "accounts validation" setting set? context.validation_enabled = settings.get("account_validation", False) # Account validation requires the user id to be an email. validation_email_regexp = settings.get( "account_validation.email_regexp", DEFAULT_EMAIL_REGEXP ) context.validation_email_regexp = re.compile(validation_email_regexp) super().__init__(request, context) # Overwrite the current principal set by Resource. if self.model.current_principal == Everyone or context.is_administrator: # Creation is anonymous, but author with write perm is this: self.model.current_principal = f"{ACCOUNT_POLICY_NAME}:{self.model.parent_id}" @reify def id_generator(self): # This generator is used for ID validation. return AccountIdGenerator() def get_parent_id(self, request): # The whole challenge here is that we want to isolate what # authenticated users can list, but give access to everything to # administrators. # Plus when anonymous create accounts, we have to set their parent id # to the same value they would obtain when authenticated. if self.context.is_administrator: if self.context.on_plural_endpoint: # Accounts created by admin should have userid as parent. if request.method.lower() == "post": return _extract_posted_body_id(request) else: # Admin see all accounts. return "*" else: # No pattern matching for admin on single record. return request.matchdict["id"] if not self.context.is_anonymous: # Authenticated users see their own account only. return request.selected_userid # Anonymous creation with PUT. if "id" in request.matchdict: return request.matchdict["id"] return _extract_posted_body_id(request) def process_object(self, new, old=None): new = super(Account, self).process_object(new, old) if "data" in self.request.json and "password" in self.request.json["data"]: new["password"] = hash_password(new["password"]) # Do not let accounts be created without usernames. if self.model.id_field not in new: error_details = {"name": "data.id", "description": "Accounts must have an ID."} raise_invalid(self.request, **error_details) # Account validation requires that the record ID is an email address. # TODO: this might be better suited for a schema. Do we have a way to # dynamically change the schema according to the settings? if self.context.validation_enabled and old is None: email_regexp = self.context.validation_email_regexp # Account validation requires that the record ID is an email address. user_email = new[self.model.id_field] if not email_regexp.match(user_email): error_details = { "name": "data.id", "description": f"Account validation is enabled, and user id should match {email_regexp}", } raise_invalid(self.request, **error_details) activation_key = str(uuid.uuid4()) new["validated"] = False # Store the activation key in the cache to be used in the `validate` endpoint. cache_validation_key(activation_key, new["id"], self.request.registry) # Administrators can reach other accounts and anonymous have no # selected_userid. So do not try to enforce. if self.context.is_administrator or self.context.is_anonymous: return new # Otherwise, we force the id to match the authenticated username. if new[self.model.id_field] != self.request.selected_userid: error_details = { "name": "data.id", "description": "Username and account ID do not match.", } raise_invalid(self.request, **error_details) return new # FILE kinto/kinto/plugins/accounts/utils.py def cache_validation_key(activation_key, username, registry): """Store a validation_key in the cache.""" settings = registry.settings hmac_secret = settings["userid_hmac_secret"] cache_key = utils.hmac_digest(hmac_secret, ACCOUNT_VALIDATION_CACHE_KEY.format(username)) # Store an activation key for 7 days by default. cache_ttl = int( settings.get( "account_validation.validation_key_cache_ttl_seconds", DEFAULT_VALIDATION_KEY_CACHE_TTL_SECONDS, ) ) cache = registry.cache cache_result = cache.set(cache_key, activation_key, ttl=cache_ttl) return cache_result # FILE kinto/kinto/plugins/accounts/utils.py def hash_password(password): # Store password safely in database as str # (bcrypt.hashpw returns base64 bytes). pwd_str = password.encode(encoding="utf-8") hashed = bcrypt.hashpw(pwd_str, bcrypt.gensalt()) return hashed.decode(encoding="utf-8") Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/plugins/accounts/views/__init__.py import re import uuid import colander from pyramid import httpexceptions from pyramid.authorization import Authenticated, Everyone from pyramid.decorator import reify from pyramid.events import subscriber from pyramid.settings import aslist from kinto.core import resource from kinto.core.errors import http_error, raise_invalid from kinto.core.events import ACTIONS, ResourceChanged from kinto.views import NameGenerator from ..mails import Emailer from ..utils import ACCOUNT_POLICY_NAME, cache_validation_key, delete_cached_account, get_cached_validation_key, hash_password def on_account_created(event):
kinto/kinto/plugins/accounts/views/__init__.py
kinto.plugins.accounts.utils.hash_password
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/plugins/accounts/utils.py import bcrypt from kinto.core import utils def hash_password(password): # Store password safely in database as str # (bcrypt.hashpw returns base64 bytes).
kinto/kinto/plugins/accounts/utils.py
kinto.views.admin.get_parent_uri
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: kinto/kinto/views/admin.py import collections import itertools import colander from kinto.authorization import RouteFactory from kinto.core import resource from kinto.core import utils as core_utils from kinto.core.events import ACTIONS, notify_resource_event from kinto.core.resource import viewset from kinto.core.storage import Filter def get_parent_uri(object_uri): """Get the parent URI for an object_uri. In order to be generic over any kind of resource hierarchy, we do this by string manipulation on the URI instead of trying to parse the URI, identify the parent resource, and generate a new URI. """
kinto/kinto/views/admin.py
alembic.script.write_hooks.register
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: alembic/alembic/script/write_hooks.py from __future__ import annotations import shlex import subprocess import sys from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Mapping from typing import Optional from typing import Union from .. import util from ..util import compat from .base import _split_on_space_comma def register(name: str) -> Callable: """A function decorator that will register that function as a write hook. See the documentation linked below for an example. .. seealso:: :ref:`post_write_hooks_custom` """
alembic/alembic/script/write_hooks.py
mongo_connector.namespace_config.match_replace_regex
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mongo-doc-manager/mongo_connector/namespace_config.py def from_namespaces(namespaces): regexes = set() strings = set() for ns in namespaces: if "*" in ns: regexes.add(namespace_to_regex(ns)) else: strings.add(ns) return RegexSet(regexes, strings) # FILE mongo-doc-manager/mongo_connector/namespace_config.py def namespace_to_regex(namespace): """Create a RegexObject from a wildcard namespace.""" db_name, coll_name = namespace.split(".", 1) # A database name cannot contain a '.' character db_regex = re.escape(db_name).replace(r"\*", "([^.]*)") # But a collection name can. coll_regex = re.escape(coll_name).replace(r"\*", "(.*)") return re.compile(r"\A" + db_regex + r"\." + coll_regex + r"\Z") # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" self._add_namespace(namespace) # Add the namespace for commands on this database cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd" dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd" self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name)) def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" src_name = namespace.source_name if "*" in src_name: self._regex_map.append((namespace_to_regex(src_name), namespace)) else: self._add_plain_namespace(namespace) def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" src_name = namespace.source_name target_name = namespace.dest_name src_names = self._reverse_plain.setdefault(target_name, set()) src_names.add(src_name) if len(src_names) > 1: # Another source namespace is already mapped to this target existing_src = (src_names - set([src_name])).pop() raise errors.InvalidConfiguration( "Multiple namespaces cannot be combined into one target " "namespace. Trying to map '%s' to '%s' but there already " "exists a mapping from '%s' to '%s'" % (src_name, target_name, existing_src, target_name) ) self._plain[src_name] = namespace src_db, _ = src_name.split(".", 1) target_db, _ = target_name.split(".", 1) self._plain_db.setdefault(src_db, set()).add(target_db) def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ # Ignore the namespace if it is excluded. if plain_src_ns in self._ex_namespace_set: return None # Include all namespaces if there are no included namespaces. if not self._regex_map and not self._plain: return Namespace( dest_name=plain_src_ns, source_name=plain_src_ns, include_fields=self._include_fields, exclude_fields=self._exclude_fields, ) # First, search for the namespace in the plain namespaces. try: return self._plain[plain_src_ns] except KeyError: # Search for the namespace in the wildcard namespaces. for regex, namespace in self._regex_map: new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name) if not new_name: continue # Save the new target Namespace in the plain namespaces so # future lookups are fast. new_namespace = namespace.with_options( dest_name=new_name, source_name=plain_src_ns ) self._add_plain_namespace(new_namespace) return new_namespace # Save the not included namespace to the excluded namespaces so # that future lookups of the same namespace are fast. self._ex_namespace_set.add(plain_src_ns) return None def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ namespace = self.lookup(plain_src_ns) if namespace: return namespace.dest_name return None def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ namespace = self.lookup(plain_src_ns) if namespace and namespace.gridfs: return namespace.dest_name return None def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ # Return the same namespace if there are no included namespaces. if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: # Return the first (and only) item in the set for src_name in src_name_set: return src_name # The target namespace could also exist in the wildcard namespaces for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ if not self._regex_map and not self._plain: return [plain_src_db] # Lookup this namespace to seed the plain_db dictionary self.lookup(plain_src_db + ".$cmd") return list(self._plain_db.get(plain_src_db, set())) def projection(self, plain_src_name): """Return the projection for the given source namespace.""" mapped = self.lookup(plain_src_name) if not mapped: return None fields = mapped.include_fields or mapped.exclude_fields if fields: include = 1 if mapped.include_fields else 0 return dict((field, include) for field in fields) return None def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ databases = set() databases.update(self._plain_db.keys()) for _, namespace in self._regex_map: database_name, _ = namespace.source_name.split(".", 1) if "*" in database_name: return [] databases.add(database_name) return list(databases) # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" ... def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py def _validate_namespaces(namespaces): """Validate wildcards and renaming in namespaces. Target namespaces should have the same number of wildcards as the source. No target namespaces overlap exactly with each other. Logs a warning when wildcard namespaces have a chance of overlapping. """ for source, namespace in namespaces.items(): target = namespace.dest_name _validate_namespace(source) _validate_namespace(target) if source.count("*") > 1 or target.count("*") > 1: raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' cannot contain more " "than one '*' character." % (source, target) ) if source.count("*") != target.count("*"): raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' must contain the " "same number of '*' characters." % (source, target) ) if "*" not in source: continue # Make sure that wildcards are not moved from database name to # collection name or vice versa, eg "db*.foo" => "db.foo_*" if ( wildcard_in_db(source) and not wildcard_in_db(target) or (not wildcard_in_db(source) and wildcard_in_db(target)) ): raise errors.InvalidConfiguration( "The namespace mapping from '%s' to '%s' is invalid. A '*' " "that appears in the source database name must also appear" "in the target database name. A '*' that appears in the " "source collection name must also appear in the target " "collection name" % (source, target) ) for source1, source2 in combinations(namespaces.keys(), 2): if wildcards_overlap(source1, source2): LOG.warning( 'Namespaces "%s" and "%s" may match the ' "same source namespace.", source1, source2, ) target1 = namespaces[source1].dest_name target2 = namespaces[source2].dest_name if target1 == target2: raise errors.InvalidConfiguration( "Multiple namespaces cannot be combined into one target " "namespace. Trying to map '%s' to '%s' but '%s' already " "corresponds to '%s' in the target system." % (source2, target2, source1, target1) ) if wildcards_overlap(target1, target2): LOG.warning( "Multiple namespaces cannot be combined into one target " "namespace. Mapping from '%s' to '%s' might overlap " "with mapping from '%s' to '%s'." % (source2, target2, source1, target1) ) # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" self._add_namespace(namespace) # Add the namespace for commands on this database cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd" dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd" self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name)) def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" ... def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py def validate_namespace_options( namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): ex_namespace_set, namespaces = _merge_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) for excluded_name in ex_namespace_set: _validate_namespace(excluded_name) if excluded_name in namespaces: raise errors.InvalidConfiguration( "Cannot include namespace '%s', it is already excluded." % (excluded_name,) ) for namespace in namespaces.values(): if namespace.include_fields and namespace.exclude_fields: raise errors.InvalidConfiguration( "Cannot mix include fields and exclude fields in " "namespace mapping for: '%s'" % (namespace.source_name,) ) if namespace.gridfs and namespace.dest_name != namespace.source_name: raise errors.InvalidConfiguration( "GridFS namespaces cannot be renamed: '%s'" % (namespace.source_name,) ) _validate_namespaces(namespaces) return ex_namespace_set, namespaces.values() # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" ... def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ namespace = self.lookup(plain_src_ns) if namespace and namespace.gridfs: return namespace.dest_name return None def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" ... def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ # Ignore the namespace if it is excluded. if plain_src_ns in self._ex_namespace_set: return None # Include all namespaces if there are no included namespaces. if not self._regex_map and not self._plain: return Namespace( dest_name=plain_src_ns, source_name=plain_src_ns, include_fields=self._include_fields, exclude_fields=self._exclude_fields, ) # First, search for the namespace in the plain namespaces. try: return self._plain[plain_src_ns] except KeyError: # Search for the namespace in the wildcard namespaces. for regex, namespace in self._regex_map: new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name) if not new_name: continue # Save the new target Namespace in the plain namespaces so # future lookups are fast. new_namespace = namespace.with_options( dest_name=new_name, source_name=plain_src_ns ) self._add_plain_namespace(new_namespace) return new_namespace # Save the not included namespace to the excluded namespaces so # that future lookups of the same namespace are fast. self._ex_namespace_set.add(plain_src_ns) return None def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" ... def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" src_name = namespace.source_name target_name = namespace.dest_name src_names = self._reverse_plain.setdefault(target_name, set()) src_names.add(src_name) if len(src_names) > 1: # Another source namespace is already mapped to this target existing_src = (src_names - set([src_name])).pop() raise errors.InvalidConfiguration( "Multiple namespaces cannot be combined into one target " "namespace. Trying to map '%s' to '%s' but there already " "exists a mapping from '%s' to '%s'" % (src_name, target_name, existing_src, target_name) ) self._plain[src_name] = namespace src_db, _ = src_name.split(".", 1) target_db, _ = target_name.split(".", 1) self._plain_db.setdefault(src_db, set()).add(target_db) def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" ... def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ namespace = self.lookup(plain_src_ns) if namespace: return namespace.dest_name return None def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" ... def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ # Return the same namespace if there are no included namespaces. if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: # Return the first (and only) item in the set for src_name in src_name_set: return src_name # The target namespace could also exist in the wildcard namespaces for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" src_name = namespace.source_name if "*" in src_name: self._regex_map.append((namespace_to_regex(src_name), namespace)) else: self._add_plain_namespace(namespace) def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... Based on the information above, please complete the function: #CURRENT_FILE: mongo-doc-manager/mongo_connector/namespace_config.py import logging import re from collections import namedtuple, MutableSet from itertools import combinations from mongo_connector import errors def match_replace_regex(regex, src_namespace, dest_namespace): """Return the new mapped namespace if the src_namespace matches the regex."""
mongo-doc-manager/mongo_connector/namespace_config.py
mongo_connector.namespace_config.namespace_to_regex
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mongo-doc-manager/mongo_connector/namespace_config.py def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ # Ignore the namespace if it is excluded. if plain_src_ns in self._ex_namespace_set: return None # Include all namespaces if there are no included namespaces. if not self._regex_map and not self._plain: return Namespace( dest_name=plain_src_ns, source_name=plain_src_ns, include_fields=self._include_fields, exclude_fields=self._exclude_fields, ) # First, search for the namespace in the plain namespaces. try: return self._plain[plain_src_ns] except KeyError: # Search for the namespace in the wildcard namespaces. for regex, namespace in self._regex_map: new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name) if not new_name: continue # Save the new target Namespace in the plain namespaces so # future lookups are fast. new_namespace = namespace.with_options( dest_name=new_name, source_name=plain_src_ns ) self._add_plain_namespace(new_namespace) return new_namespace # Save the not included namespace to the excluded namespaces so # that future lookups of the same namespace are fast. self._ex_namespace_set.add(plain_src_ns) return None # FILE mongo-doc-manager/mongo_connector/namespace_config.py def match_replace_regex(regex, src_namespace, dest_namespace): """Return the new mapped namespace if the src_namespace matches the regex.""" match = regex.match(src_namespace) if match: return dest_namespace.replace("*", match.group(1)) return None # FILE mongo-doc-manager/mongo_connector/namespace_config.py class NamespaceConfig(object): """Manages included and excluded namespaces. """ def __init__( self, namespace_set=None, ex_namespace_set=None, gridfs_set=None, dest_mapping=None, namespace_options=None, include_fields=None, exclude_fields=None, ): # A mapping from non-wildcard source namespaces to a MappedNamespace # containing the non-wildcard target name. self._plain = {} # A mapping from non-wildcard target namespaces to their # corresponding non-wildcard source namespace. Namespaces have a # one-to-one relationship with the target system, meaning multiple # source namespaces cannot be merged into a single namespace in the # target. self._reverse_plain = {} # A mapping from non-wildcard source database names to the set of # non-wildcard target database names. self._plain_db = {} # A list of (re.RegexObject, MappedNamespace) tuples. regex_map maps # wildcard source namespaces to a MappedNamespace containing the # wildcard target name. When a namespace is matched, an entry is # created in `self.plain` for faster subsequent lookups. self._regex_map = [] # Fields to include or exclude from all namespaces self._include_fields = validate_include_fields(include_fields) self._exclude_fields = validate_exclude_fields(exclude_fields) # Add each included namespace. Namespaces have a one-to-one # relationship to the target system, meaning multiple source # namespaces cannot be merged into a single namespace in the target. ex_namespace_set, namespaces = validate_namespace_options( namespace_set=namespace_set, ex_namespace_set=ex_namespace_set, gridfs_set=gridfs_set, dest_mapping=dest_mapping, namespace_options=namespace_options, include_fields=include_fields, exclude_fields=exclude_fields, ) # The set of, possibly wildcard, namespaces to exclude. self._ex_namespace_set = RegexSet.from_namespaces(ex_namespace_set) for namespace in namespaces: self._register_namespace_and_command(namespace) def _register_namespace_and_command(self, namespace): """Add a Namespace and the corresponding command namespace.""" ... def _add_namespace(self, namespace): """Add an included and possibly renamed Namespace.""" src_name = namespace.source_name if "*" in src_name: self._regex_map.append((namespace_to_regex(src_name), namespace)) else: self._add_plain_namespace(namespace) def _add_plain_namespace(self, namespace): """Add an included and possibly renamed non-wildcard Namespace.""" ... def lookup(self, plain_src_ns): """Given a plain source namespace, return the corresponding Namespace object, or None if it is not included. """ ... def map_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace, or None if it is not included. """ ... def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ ... def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ ... def map_db(self, plain_src_db): """Given a plain source database, return the list of target databases. Individual collections in a database can be mapped to different target databases, so map_db can return multiple databases. This function must return all target database names so we make the following restrictions on wildcards: 1) A wildcard appearing in the source database name must also appear in the target database name, eg "db*.col" => "new_db_*.new_col". 2) A wildcard appearing in the source collection name must also appear in the target collection name, eg "db.col*" => "new_db.new_col*". This is used by the CommandHelper for the dropDatabase command. """ ... def projection(self, plain_src_name): """Return the projection for the given source namespace.""" ... def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ ... # FILE mongo-doc-manager/mongo_connector/namespace_config.py class RegexSet(MutableSet): """Set that stores both plain strings and RegexObjects. Membership query results are cached so that repeated lookups of the same string are fast. """ def __init__(self, regexes, strings): self._regexes = set(regexes) self._plain = set(strings) self._not_found_cache = set() def __contains__(self, item): ... def __iter__(self): ... def __len__(self): ... def add(self, string): ... def discard(self, string): ... def from_namespaces(namespaces): regexes = set() strings = set() for ns in namespaces: if "*" in ns: regexes.add(namespace_to_regex(ns)) else: strings.add(ns) return RegexSet(regexes, strings) Based on the information above, please complete the function: #CURRENT_FILE: mongo-doc-manager/mongo_connector/namespace_config.py import logging import re from collections import namedtuple, MutableSet from itertools import combinations from mongo_connector import errors def namespace_to_regex(namespace): """Create a RegexObject from a wildcard namespace."""
mongo-doc-manager/mongo_connector/namespace_config.py
mongo_connector.util.long_to_bson_ts
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE mongo-doc-manager/mongo_connector/util.py def bson_ts_to_long(timestamp): """Convert BSON timestamp into integer. Conversion rule is based from the specs (http://bsonspec.org/#/specification). """ return (timestamp.time << 32) + timestamp.inc # LIB bson class Timestamp: """MongoDB internal timestamps used in the opLog.""" __slots__ = ("__time", "__inc") __getstate__ = _getstate_slots __setstate__ = _setstate_slots _type_marker = 17 def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: """Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need to store a regular timestamp, please use a :class:`~datetime.datetime`. Raises :class:`TypeError` if `time` is not an instance of :class: `int` or :class:`~datetime.datetime`, or `inc` is not an instance of :class:`int`. Raises :class:`ValueError` if `time` or `inc` is not in [0, 2**32). :param time: time in seconds since epoch UTC, or a naive UTC :class:`~datetime.datetime`, or an aware :class:`~datetime.datetime` :param inc: the incrementing counter """ if isinstance(time, datetime.datetime): offset = time.utcoffset() if offset is not None: time = time - offset time = int(calendar.timegm(time.timetuple())) if not isinstance(time, int): raise TypeError(f"time must be an instance of int, not {type(time)}") if not isinstance(inc, int): raise TypeError(f"inc must be an instance of int, not {type(inc)}") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") if not 0 <= inc < UPPERBOUND: raise ValueError("inc must be contained in [0, 2**32)") self.__time = time self.__inc = inc @property def time(self) -> int: """Get the time portion of this :class:`Timestamp`.""" return self.__time @property def inc(self) -> int: """Get the inc portion of this :class:`Timestamp`.""" return self.__inc def __eq__(self, other: Any) -> bool: if isinstance(other, Timestamp): return self.__time == other.time and self.__inc == other.inc else: return NotImplemented def __hash__(self) -> int: return hash(self.time) ^ hash(self.inc) def __ne__(self, other: Any) -> bool: return not self == other def __lt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) < (other.time, other.inc) return NotImplemented def __le__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) <= (other.time, other.inc) return NotImplemented def __gt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) > (other.time, other.inc) return NotImplemented def __ge__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) >= (other.time, other.inc) return NotImplemented def __repr__(self) -> str: return f"Timestamp({self.__time}, {self.__inc})" def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. The returned datetime's timezone is UTC. """ return datetime.datetime.fromtimestamp(self.__time, utc) # LIB bson class Timestamp: """MongoDB internal timestamps used in the opLog.""" def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: """Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need to store a regular timestamp, please use a :class:`~datetime.datetime`. Raises :class:`TypeError` if `time` is not an instance of :class: `int` or :class:`~datetime.datetime`, or `inc` is not an instance of :class:`int`. Raises :class:`ValueError` if `time` or `inc` is not in [0, 2**32). :param time: time in seconds since epoch UTC, or a naive UTC :class:`~datetime.datetime`, or an aware :class:`~datetime.datetime` :param inc: the incrementing counter """ if isinstance(time, datetime.datetime): offset = time.utcoffset() if offset is not None: time = time - offset time = int(calendar.timegm(time.timetuple())) if not isinstance(time, int): raise TypeError(f"time must be an instance of int, not {type(time)}") if not isinstance(inc, int): raise TypeError(f"inc must be an instance of int, not {type(inc)}") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") if not 0 <= inc < UPPERBOUND: raise ValueError("inc must be contained in [0, 2**32)") self.__time = time self.__inc = inc def time(self) -> int: """Get the time portion of this :class:`Timestamp`.""" return self.__time def inc(self) -> int: """Get the inc portion of this :class:`Timestamp`.""" ... def __eq__(self, other: Any) -> bool: ... def __hash__(self) -> int: ... def __ne__(self, other: Any) -> bool: ... def __lt__(self, other: Any) -> bool: ... def __le__(self, other: Any) -> bool: ... def __gt__(self, other: Any) -> bool: ... def __ge__(self, other: Any) -> bool: ... def __repr__(self) -> str: ... def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. The returned datetime's timezone is UTC. """ ... # LIB bson class Timestamp: """MongoDB internal timestamps used in the opLog.""" def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: """Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need to store a regular timestamp, please use a :class:`~datetime.datetime`. Raises :class:`TypeError` if `time` is not an instance of :class: `int` or :class:`~datetime.datetime`, or `inc` is not an instance of :class:`int`. Raises :class:`ValueError` if `time` or `inc` is not in [0, 2**32). :param time: time in seconds since epoch UTC, or a naive UTC :class:`~datetime.datetime`, or an aware :class:`~datetime.datetime` :param inc: the incrementing counter """ if isinstance(time, datetime.datetime): offset = time.utcoffset() if offset is not None: time = time - offset time = int(calendar.timegm(time.timetuple())) if not isinstance(time, int): raise TypeError(f"time must be an instance of int, not {type(time)}") if not isinstance(inc, int): raise TypeError(f"inc must be an instance of int, not {type(inc)}") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") if not 0 <= inc < UPPERBOUND: raise ValueError("inc must be contained in [0, 2**32)") self.__time = time self.__inc = inc def time(self) -> int: """Get the time portion of this :class:`Timestamp`.""" ... def inc(self) -> int: """Get the inc portion of this :class:`Timestamp`.""" return self.__inc def __eq__(self, other: Any) -> bool: ... def __hash__(self) -> int: ... def __ne__(self, other: Any) -> bool: ... def __lt__(self, other: Any) -> bool: ... def __le__(self, other: Any) -> bool: ... def __gt__(self, other: Any) -> bool: ... def __ge__(self, other: Any) -> bool: ... def __repr__(self) -> str: ... def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. The returned datetime's timezone is UTC. """ ... Based on the information above, please complete the function: #CURRENT_FILE: mongo-doc-manager/mongo_connector/util.py import logging import sys import time from bson.timestamp import Timestamp from pymongo import errors def long_to_bson_ts(val): """Convert integer into BSON timestamp. """
mongo-doc-manager/mongo_connector/util.py
mongo_connector.doc_managers.formatters.DocumentFlattener.format_document
You are a Python programmer. Here is all the context you may find useful to complete the function: # LIB bson def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. Raises ``TypeError``, ``ValueError``, ``KeyError``, or :exc:`~bson.errors.InvalidId` on invalid MongoDB Extended JSON. :param json_options: A :class:`JSONOptions` instance used to modify the decoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. .. versionchanged:: 4.0 Now loads :class:`datetime.datetime` instances as naive by default. To load timezone aware instances utilize the `json_options` parameter. See :ref:`tz_aware_default_change` for an example. .. versionchanged:: 3.5 Parses Relaxed and Canonical Extended JSON as well as PyMongo's legacy format. Now raises ``TypeError`` or ``ValueError`` when parsing JSON type wrappers with values of the wrong type or any extra keys. .. versionchanged:: 3.4 Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) # Execution time optimization if json_options.document_class is dict if json_options.document_class is dict: kwargs["object_hook"] = lambda obj: object_hook(obj, json_options) else: kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) return json.loads(s, *args, **kwargs) # LIB bson def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: """Helper function that wraps :func:`json.dumps`. Recursive function that handles all BSON types including :class:`~bson.binary.Binary` and :class:`~bson.code.Code`. :param json_options: A :class:`JSONOptions` instance used to modify the encoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. .. versionchanged:: 4.0 Now outputs MongoDB Relaxed Extended JSON by default (using :const:`DEFAULT_JSON_OPTIONS`). .. versionchanged:: 3.4 Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) return json.dumps(_json_convert(obj, json_options), *args, **kwargs) # LIB bson def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: match = None for k in dct: if k in _PARSERS_SET: match = k break if match: return _PARSERS[match](dct, json_options) return dct Based on the information above, please complete the function: #CURRENT_FILE: mongo-doc-manager/mongo_connector/doc_managers/formatters.py import base64 import datetime import re from uuid import UUID from math import isnan, isinf import logging import bson import bson.json_util from bson.regex import Regex class DocumentFlattener(DefaultDocumentFormatter): """Formatter that completely flattens documents and unwinds arrays: An example: {"a": 2, "b": { "c": { "d": 5 } }, "e": [6, 7, 8] } becomes: {"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8} """ def transform_element(self, key, value): if isinstance(value, list): for li, lv in enumerate(value): for inner_k, inner_v in self.transform_element("%s.%s" % (key, li), lv): yield inner_k, inner_v elif isinstance(value, dict): formatted = self.format_document(value) for doc_key in formatted: yield "%s.%s" % (key, doc_key), formatted[doc_key] else: # We assume that transform_value will return a 'flat' value, # not a list or dict yield key, self.transform_value(value) def format_document(self, document):
mongo-doc-manager/mongo_connector/doc_managers/formatters.py
bplustree.memory.open_file_in_dir
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: bplustree/bplustree/memory.py import enum import io from logging import getLogger import os import platform from typing import Union, Tuple, Optional import cachetools import rwlock from .node import Node from .const import ENDIAN, PAGE_REFERENCE_BYTES, OTHERS_BYTES, TreeConf, FRAME_TYPE_BYTES def open_file_in_dir(path: str) -> Tuple[io.FileIO, Optional[int]]: """Open a file and its directory. The file is opened in binary mode and created if it does not exist. Both file descriptors must be closed after use to prevent them from leaking. On Windows, the directory is not opened, as it is useless. """
bplustree/bplustree/memory.py
bplustree.memory.FileMemory.read_transaction
You are a Python programmer. Here is all the context you may find useful to complete the function: # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): ... def _load_wal(self): ... def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): if frame_type is FrameType.PAGE: self._not_committed_pages[page] = page_start elif frame_type is FrameType.COMMIT: self._committed_pages.update(self._not_committed_pages) self._not_committed_pages = dict() elif frame_type is FrameType.ROLLBACK: self._not_committed_pages = dict() else: assert False def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/memory.py def read_from_file(file_fd: io.FileIO, start: int, stop: int) -> bytes: length = stop - start assert length >= 0 file_fd.seek(start) data = bytes() while file_fd.tell() < stop: read_data = file_fd.read(stop - file_fd.tell()) if read_data == b'': raise ReachedEndOfFile('Read until the end of file') data += read_data assert len(data) == length return data # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): ... def _load_wal(self): ... def _load_next_frame(self): start = self._fd.tell() stop = start + self.FRAME_HEADER_LENGTH data = read_from_file(self._fd, start, stop) frame_type = int.from_bytes(data[0:FRAME_TYPE_BYTES], ENDIAN) page = int.from_bytes( data[FRAME_TYPE_BYTES:FRAME_TYPE_BYTES+PAGE_REFERENCE_BYTES], ENDIAN ) frame_type = FrameType(frame_type) if frame_type is FrameType.PAGE: self._fd.seek(stop + self._page_size) self._index_frame(frame_type, page, stop) def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): data = self._page_size.to_bytes(OTHERS_BYTES, ENDIAN) self._fd.seek(0) write_to_file(self._fd, self._dir_fd, data, True) def _load_wal(self): ... def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/memory.py def fsync_file_and_dir(file_fileno: int, dir_fileno: Optional[int]): os.fsync(file_fileno) if dir_fileno is not None: os.fsync(dir_fileno) # FILE bplustree/bplustree/memory.py def write_to_file(file_fd: io.FileIO, dir_fileno: Optional[int], data: bytes, fsync: bool=True): length_to_write = len(data) written = 0 while written < length_to_write: written = file_fd.write(data[written:]) if fsync: fsync_file_and_dir(file_fd.fileno(), dir_fileno) # FILE bplustree/bplustree/node.py class Node(metaclass=abc.ABCMeta): def __init__(self, tree_conf: TreeConf, data: Optional[bytes]=None, page: int=None, parent: 'Node'=None, next_page: int=None): self._tree_conf = tree_conf self.entries = list() self.page = page self.parent = parent self.next_page = next_page if data: self.load(data) def load(self, data: bytes): assert len(data) == self._tree_conf.page_size end_used_page_length = NODE_TYPE_BYTES + USED_PAGE_LENGTH_BYTES used_page_length = int.from_bytes( data[NODE_TYPE_BYTES:end_used_page_length], ENDIAN ) end_header = end_used_page_length + PAGE_REFERENCE_BYTES self.next_page = int.from_bytes( data[end_used_page_length:end_header], ENDIAN ) if self.next_page == 0: self.next_page = None entry_length = self._entry_class(self._tree_conf).length for start_offset in range(end_header, used_page_length, entry_length): entry_data = data[start_offset:start_offset+entry_length] entry = self._entry_class(self._tree_conf, data=entry_data) self.entries.append(entry) def dump(self) -> bytearray: ... def can_add_entry(self) -> bool: ... def can_delete_entry(self) -> bool: ... def smallest_key(self): ... def smallest_entry(self): ... def biggest_key(self): ... def biggest_entry(self): ... def num_children(self) -> int: """Number of entries or other nodes connected to the node.""" ... def pop_smallest(self) -> Entry: """Remove and return the smallest entry.""" ... def insert_entry(self, entry: Entry): ... def insert_entry_at_the_end(self, entry: Entry): """Insert an entry at the end of the entry list. This is an optimized version of `insert_entry` when it is known that the key to insert is bigger than any other entries. """ ... def remove_entry(self, key): ... def get_entry(self, key) -> Entry: ... def _find_entry_index(self, key) -> int: ... def split_entries(self) -> list: """Split the entries in half. Keep the lower part in the node and return the upper one. """ ... def from_page_data(cls, tree_conf: TreeConf, data: bytes, ... def __repr__(self): ... def __eq__(self, other): ... # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): ... def _load_wal(self): ... def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, page_data: Optional[bytes]=None): if frame_type is FrameType.PAGE and (not page or not page_data): raise ValueError('PAGE frame without page data') if page_data and len(page_data) != self._page_size: raise ValueError('Page data is different from page size') if not page: page = 0 if frame_type is not FrameType.PAGE: page_data = b'' data = ( frame_type.value.to_bytes(FRAME_TYPE_BYTES, ENDIAN) + page.to_bytes(PAGE_REFERENCE_BYTES, ENDIAN) + page_data ) self._fd.seek(0, io.SEEK_END) write_to_file(self._fd, self._dir_fd, data, fsync=frame_type != FrameType.PAGE) self._index_frame(frame_type, page, self._fd.tell() - self._page_size) def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): ... def _load_wal(self): self._fd.seek(0) header_data = read_from_file(self._fd, 0, OTHERS_BYTES) assert int.from_bytes(header_data, ENDIAN) == self._page_size while True: try: self._load_next_frame() except ReachedEndOfFile: break if self._not_committed_pages: logger.warning('WAL has uncommitted data, discarding it') self._not_committed_pages = dict() def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/node.py class Node(metaclass=abc.ABCMeta): def __init__(self, tree_conf: TreeConf, data: Optional[bytes]=None, page: int=None, parent: 'Node'=None, next_page: int=None): self._tree_conf = tree_conf self.entries = list() self.page = page self.parent = parent self.next_page = next_page if data: self.load(data) def load(self, data: bytes): ... def dump(self) -> bytearray: ... def can_add_entry(self) -> bool: ... def can_delete_entry(self) -> bool: ... def smallest_key(self): ... def smallest_entry(self): ... def biggest_key(self): ... def biggest_entry(self): ... def num_children(self) -> int: """Number of entries or other nodes connected to the node.""" ... def pop_smallest(self) -> Entry: """Remove and return the smallest entry.""" ... def insert_entry(self, entry: Entry): ... def insert_entry_at_the_end(self, entry: Entry): """Insert an entry at the end of the entry list. This is an optimized version of `insert_entry` when it is known that the key to insert is bigger than any other entries. """ ... def remove_entry(self, key): ... def get_entry(self, key) -> Entry: ... def _find_entry_index(self, key) -> int: ... def split_entries(self) -> list: """Split the entries in half. Keep the lower part in the node and return the upper one. """ ... def from_page_data(cls, tree_conf: TreeConf, data: bytes, ... def __repr__(self): ... def __eq__(self, other): ... # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): ... def _load_wal(self): ... def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/node.py class Node(metaclass=abc.ABCMeta): __slots__ = ['_tree_conf', 'entries', 'page', 'parent', 'next_page'] # Attributes to redefine in inherited classes _node_type_int = 0 max_children = 0 min_children = 0 _entry_class = None def __init__(self, tree_conf: TreeConf, data: Optional[bytes]=None, page: int=None, parent: 'Node'=None, next_page: int=None): self._tree_conf = tree_conf self.entries = list() self.page = page self.parent = parent self.next_page = next_page if data: self.load(data) def load(self, data: bytes): assert len(data) == self._tree_conf.page_size end_used_page_length = NODE_TYPE_BYTES + USED_PAGE_LENGTH_BYTES used_page_length = int.from_bytes( data[NODE_TYPE_BYTES:end_used_page_length], ENDIAN ) end_header = end_used_page_length + PAGE_REFERENCE_BYTES self.next_page = int.from_bytes( data[end_used_page_length:end_header], ENDIAN ) if self.next_page == 0: self.next_page = None entry_length = self._entry_class(self._tree_conf).length for start_offset in range(end_header, used_page_length, entry_length): entry_data = data[start_offset:start_offset+entry_length] entry = self._entry_class(self._tree_conf, data=entry_data) self.entries.append(entry) def dump(self) -> bytearray: data = bytearray() for record in self.entries: data.extend(record.dump()) used_page_length = len(data) + 4 assert 0 <= used_page_length < self._tree_conf.page_size next_page = 0 if self.next_page is None else self.next_page header = ( self._node_type_int.to_bytes(1, ENDIAN) + used_page_length.to_bytes(3, ENDIAN) + next_page.to_bytes(PAGE_REFERENCE_BYTES, ENDIAN) ) data = bytearray(header) + data padding = self._tree_conf.page_size - len(data) assert padding >= 0 data.extend(bytearray(padding)) assert len(data) == self._tree_conf.page_size return data @property def can_add_entry(self) -> bool: return self.num_children < self.max_children @property def can_delete_entry(self) -> bool: return self.num_children > self.min_children @property def smallest_key(self): return self.smallest_entry.key @property def smallest_entry(self): return self.entries[0] @property def biggest_key(self): return self.biggest_entry.key @property def biggest_entry(self): return self.entries[-1] @property @abc.abstractmethod def num_children(self) -> int: """Number of entries or other nodes connected to the node.""" def pop_smallest(self) -> Entry: """Remove and return the smallest entry.""" return self.entries.pop(0) def insert_entry(self, entry: Entry): bisect.insort(self.entries, entry) def insert_entry_at_the_end(self, entry: Entry): """Insert an entry at the end of the entry list. This is an optimized version of `insert_entry` when it is known that the key to insert is bigger than any other entries. """ self.entries.append(entry) def remove_entry(self, key): self.entries.pop(self._find_entry_index(key)) def get_entry(self, key) -> Entry: return self.entries[self._find_entry_index(key)] def _find_entry_index(self, key) -> int: entry = self._entry_class( self._tree_conf, key=key # Hack to compare and order ) i = bisect.bisect_left(self.entries, entry) if i != len(self.entries) and self.entries[i] == entry: return i raise ValueError('No entry for key {}'.format(key)) def split_entries(self) -> list: """Split the entries in half. Keep the lower part in the node and return the upper one. """ len_entries = len(self.entries) rv = self.entries[len_entries//2:] self.entries = self.entries[:len_entries//2] assert len(self.entries) + len(rv) == len_entries return rv @classmethod def from_page_data(cls, tree_conf: TreeConf, data: bytes, page: int=None) -> 'Node': node_type_byte = data[0:NODE_TYPE_BYTES] node_type_int = int.from_bytes(node_type_byte, ENDIAN) if node_type_int == 1: return LonelyRootNode(tree_conf, data, page) elif node_type_int == 2: return RootNode(tree_conf, data, page) elif node_type_int == 3: return InternalNode(tree_conf, data, page) elif node_type_int == 4: return LeafNode(tree_conf, data, page) else: assert False, 'No Node with type {} exists'.format(node_type_int) def __repr__(self): return '<{}: page={} entries={}>'.format( self.__class__.__name__, self.page, len(self.entries) ) def __eq__(self, other): return ( self.__class__ is other.__class__ and self.page == other.page and self.entries == other.entries ) # FILE bplustree/bplustree/memory.py class FakeCache: """A cache that doesn't cache anything. Because cachetools does not work with maxsize=0. """ def get(self, k): pass def __setitem__(self, key, value): pass def clear(self): pass # FILE bplustree/bplustree/memory.py class WAL: __slots__ = ['filename', '_fd', '_dir_fd', '_page_size', '_committed_pages', '_not_committed_pages', 'needs_recovery'] FRAME_HEADER_LENGTH = ( FRAME_TYPE_BYTES + PAGE_REFERENCE_BYTES ) def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" if self._not_committed_pages: logger.warning('Closing WAL with uncommitted data, discarding it') fsync_file_and_dir(self._fd.fileno(), self._dir_fd) for page, page_start in self._committed_pages.items(): page_data = read_from_file( self._fd, page_start, page_start + self._page_size ) yield page, page_data self._fd.close() os.unlink(self.filename) if self._dir_fd is not None: os.fsync(self._dir_fd) os.close(self._dir_fd) def _create_header(self): data = self._page_size.to_bytes(OTHERS_BYTES, ENDIAN) self._fd.seek(0) write_to_file(self._fd, self._dir_fd, data, True) def _load_wal(self): self._fd.seek(0) header_data = read_from_file(self._fd, 0, OTHERS_BYTES) assert int.from_bytes(header_data, ENDIAN) == self._page_size while True: try: self._load_next_frame() except ReachedEndOfFile: break if self._not_committed_pages: logger.warning('WAL has uncommitted data, discarding it') self._not_committed_pages = dict() def _load_next_frame(self): start = self._fd.tell() stop = start + self.FRAME_HEADER_LENGTH data = read_from_file(self._fd, start, stop) frame_type = int.from_bytes(data[0:FRAME_TYPE_BYTES], ENDIAN) page = int.from_bytes( data[FRAME_TYPE_BYTES:FRAME_TYPE_BYTES+PAGE_REFERENCE_BYTES], ENDIAN ) frame_type = FrameType(frame_type) if frame_type is FrameType.PAGE: self._fd.seek(stop + self._page_size) self._index_frame(frame_type, page, stop) def _index_frame(self, frame_type: FrameType, page: int, page_start: int): if frame_type is FrameType.PAGE: self._not_committed_pages[page] = page_start elif frame_type is FrameType.COMMIT: self._committed_pages.update(self._not_committed_pages) self._not_committed_pages = dict() elif frame_type is FrameType.ROLLBACK: self._not_committed_pages = dict() else: assert False def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, page_data: Optional[bytes]=None): if frame_type is FrameType.PAGE and (not page or not page_data): raise ValueError('PAGE frame without page data') if page_data and len(page_data) != self._page_size: raise ValueError('Page data is different from page size') if not page: page = 0 if frame_type is not FrameType.PAGE: page_data = b'' data = ( frame_type.value.to_bytes(FRAME_TYPE_BYTES, ENDIAN) + page.to_bytes(PAGE_REFERENCE_BYTES, ENDIAN) + page_data ) self._fd.seek(0, io.SEEK_END) write_to_file(self._fd, self._dir_fd, data, fsync=frame_type != FrameType.PAGE) self._index_frame(frame_type, page, self._fd.tell() - self._page_size) def get_page(self, page: int) -> Optional[bytes]: page_start = None for store in (self._not_committed_pages, self._committed_pages): page_start = store.get(page) if page_start: break if not page_start: return None return read_from_file(self._fd, page_start, page_start + self._page_size) def set_page(self, page: int, page_data: bytes): self._add_frame(FrameType.PAGE, page, page_data) def commit(self): # Commit is a no-op when there is no uncommitted pages if self._not_committed_pages: self._add_frame(FrameType.COMMIT) def rollback(self): # Rollback is a no-op when there is no uncommitted pages if self._not_committed_pages: self._add_frame(FrameType.ROLLBACK) def __repr__(self): return '<WAL: {}>'.format(self.filename) # FILE bplustree/bplustree/memory.py def open_file_in_dir(path: str) -> Tuple[io.FileIO, Optional[int]]: """Open a file and its directory. The file is opened in binary mode and created if it does not exist. Both file descriptors must be closed after use to prevent them from leaking. On Windows, the directory is not opened, as it is useless. """ directory = os.path.dirname(path) if not os.path.isdir(directory): raise ValueError('No directory {}'.format(directory)) if not os.path.exists(path): file_fd = open(path, mode='x+b', buffering=0) else: file_fd = open(path, mode='r+b', buffering=0) if platform.system() == 'Windows': # Opening a directory is not possible on Windows, but that is not # a problem since Windows does not need to fsync the directory in # order to persist metadata dir_fd = None else: dir_fd = os.open(directory, os.O_RDONLY) return file_fd, dir_fd # FILE bplustree/bplustree/node.py class Node(metaclass=abc.ABCMeta): def __init__(self, tree_conf: TreeConf, data: Optional[bytes]=None, page: int=None, parent: 'Node'=None, next_page: int=None): self._tree_conf = tree_conf self.entries = list() self.page = page self.parent = parent self.next_page = next_page if data: self.load(data) def load(self, data: bytes): ... def dump(self) -> bytearray: data = bytearray() for record in self.entries: data.extend(record.dump()) used_page_length = len(data) + 4 assert 0 <= used_page_length < self._tree_conf.page_size next_page = 0 if self.next_page is None else self.next_page header = ( self._node_type_int.to_bytes(1, ENDIAN) + used_page_length.to_bytes(3, ENDIAN) + next_page.to_bytes(PAGE_REFERENCE_BYTES, ENDIAN) ) data = bytearray(header) + data padding = self._tree_conf.page_size - len(data) assert padding >= 0 data.extend(bytearray(padding)) assert len(data) == self._tree_conf.page_size return data def can_add_entry(self) -> bool: ... def can_delete_entry(self) -> bool: ... def smallest_key(self): ... def smallest_entry(self): ... def biggest_key(self): ... def biggest_entry(self): ... def num_children(self) -> int: """Number of entries or other nodes connected to the node.""" ... def pop_smallest(self) -> Entry: """Remove and return the smallest entry.""" ... def insert_entry(self, entry: Entry): ... def insert_entry_at_the_end(self, entry: Entry): """Insert an entry at the end of the entry list. This is an optimized version of `insert_entry` when it is known that the key to insert is bigger than any other entries. """ ... def remove_entry(self, key): ... def get_entry(self, key) -> Entry: ... def _find_entry_index(self, key) -> int: ... def split_entries(self) -> list: """Split the entries in half. Keep the lower part in the node and return the upper one. """ ... def from_page_data(cls, tree_conf: TreeConf, data: bytes, ... def __repr__(self): ... def __eq__(self, other): ... # FILE bplustree/bplustree/node.py class Node(metaclass=abc.ABCMeta): def __init__(self, tree_conf: TreeConf, data: Optional[bytes]=None, page: int=None, parent: 'Node'=None, next_page: int=None): self._tree_conf = tree_conf self.entries = list() self.page = page self.parent = parent self.next_page = next_page if data: self.load(data) def load(self, data: bytes): ... def dump(self) -> bytearray: ... def can_add_entry(self) -> bool: ... def can_delete_entry(self) -> bool: ... def smallest_key(self): ... def smallest_entry(self): ... def biggest_key(self): ... def biggest_entry(self): ... def num_children(self) -> int: """Number of entries or other nodes connected to the node.""" ... def pop_smallest(self) -> Entry: """Remove and return the smallest entry.""" ... def insert_entry(self, entry: Entry): ... def insert_entry_at_the_end(self, entry: Entry): """Insert an entry at the end of the entry list. This is an optimized version of `insert_entry` when it is known that the key to insert is bigger than any other entries. """ ... def remove_entry(self, key): ... def get_entry(self, key) -> Entry: ... def _find_entry_index(self, key) -> int: ... def split_entries(self) -> list: """Split the entries in half. Keep the lower part in the node and return the upper one. """ ... def from_page_data(cls, tree_conf: TreeConf, data: bytes, page: int=None) -> 'Node': node_type_byte = data[0:NODE_TYPE_BYTES] node_type_int = int.from_bytes(node_type_byte, ENDIAN) if node_type_int == 1: return LonelyRootNode(tree_conf, data, page) elif node_type_int == 2: return RootNode(tree_conf, data, page) elif node_type_int == 3: return InternalNode(tree_conf, data, page) elif node_type_int == 4: return LeafNode(tree_conf, data, page) else: assert False, 'No Node with type {} exists'.format(node_type_int) def __repr__(self): ... def __eq__(self, other): ... # FILE bplustree/bplustree/memory.py class FakeCache: """A cache that doesn't cache anything. Because cachetools does not work with maxsize=0. """ def get(self, k): pass def __setitem__(self, key, value): ... def clear(self): ... # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" if self._not_committed_pages: logger.warning('Closing WAL with uncommitted data, discarding it') fsync_file_and_dir(self._fd.fileno(), self._dir_fd) for page, page_start in self._committed_pages.items(): page_data = read_from_file( self._fd, page_start, page_start + self._page_size ) yield page, page_data self._fd.close() os.unlink(self.filename) if self._dir_fd is not None: os.fsync(self._dir_fd) os.close(self._dir_fd) def _create_header(self): ... def _load_wal(self): ... def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): ... def _load_wal(self): ... def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: page_start = None for store in (self._not_committed_pages, self._committed_pages): page_start = store.get(page) if page_start: break if not page_start: return None return read_from_file(self._fd, page_start, page_start + self._page_size) def set_page(self, page: int, page_data: bytes): ... def commit(self): ... def rollback(self): ... def __repr__(self): ... # FILE bplustree/bplustree/memory.py class WAL: def __init__(self, filename: str, page_size: int): self.filename = filename + '-wal' self._fd, self._dir_fd = open_file_in_dir(self.filename) self._page_size = page_size self._committed_pages = dict() self._not_committed_pages = dict() self._fd.seek(0, io.SEEK_END) if self._fd.tell() == 0: self._create_header() self.needs_recovery = False else: logger.warning('Found an existing WAL file, ' 'the B+Tree was not closed properly') self.needs_recovery = True self._load_wal() def checkpoint(self): """Transfer the modified data back to the tree and close the WAL.""" ... def _create_header(self): ... def _load_wal(self): ... def _load_next_frame(self): ... def _index_frame(self, frame_type: FrameType, page: int, page_start: int): ... def _add_frame(self, frame_type: FrameType, page: Optional[int]=None, ... def get_page(self, page: int) -> Optional[bytes]: ... def set_page(self, page: int, page_data: bytes): self._add_frame(FrameType.PAGE, page, page_data) def commit(self): ... def rollback(self): ... def __repr__(self): ... Based on the information above, please complete the function: #CURRENT_FILE: bplustree/bplustree/memory.py import enum import io from logging import getLogger import os import platform from typing import Union, Tuple, Optional import cachetools import rwlock from .node import Node from .const import ENDIAN, PAGE_REFERENCE_BYTES, OTHERS_BYTES, TreeConf, FRAME_TYPE_BYTES class FileMemory: __slots__ = ['_filename', '_tree_conf', '_lock', '_cache', '_fd', '_dir_fd', '_wal', 'last_page'] def __init__(self, filename: str, tree_conf: TreeConf, cache_size: int=512): self._filename = filename self._tree_conf = tree_conf self._lock = rwlock.RWLock() if cache_size == 0: self._cache = FakeCache() else: self._cache = cachetools.LRUCache(maxsize=cache_size) self._fd, self._dir_fd = open_file_in_dir(filename) self._wal = WAL(filename, tree_conf.page_size) if self._wal.needs_recovery: self.perform_checkpoint(reopen_wal=True) # Get the next available page self._fd.seek(0, io.SEEK_END) last_byte = self._fd.tell() self.last_page = int(last_byte / self._tree_conf.page_size) def get_node(self, page: int): """Get a node from storage. The cache is not there to prevent hitting the disk, the OS is already very good at it. It is there to avoid paying the price of deserializing the data to create the Node object and its entry. This is a very expensive operation in Python. Since we have at most a single writer we can write to cache on `set_node` if we invalidate the cache when a transaction is rolled back. """ node = self._cache.get(page) if node is not None: return node data = self._wal.get_page(page) if not data: data = self._read_page(page) node = Node.from_page_data(self._tree_conf, data=data, page=page) self._cache[node.page] = node return node def set_node(self, node: Node): self._wal.set_page(node.page, node.dump()) self._cache[node.page] = node def set_page(self, page: int, data: bytes): """Set a raw page of data. Used currently only for overflow pages. """ self._wal.set_page(page, data) def get_page(self, page: int) -> bytes: data = self._wal.get_page(page) if not data: data = self._read_page(page) return data @property def read_transaction(self):
bplustree/bplustree/memory.py
bplustree.utils.pairwise
You are a Python programmer. Here is all the context you may find useful to complete the function: Based on the information above, please complete the function: #CURRENT_FILE: bplustree/bplustree/utils.py import itertools from typing import Iterable def pairwise(iterable: Iterable): """Iterate over elements two by two. s -> (s0,s1), (s1,s2), (s2, s3), ... """
bplustree/bplustree/utils.py