text
stringlengths
81
112k
Returns 0 if successful or -1 if the address is out of range def FSeek(params, ctxt, scope, stream, coord): """Returns 0 if successful or -1 if the address is out of range """ if len(params) != 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSeek accepts only one argument") pos = PYVAL(params[0]) curr_pos = stream.tell() fsize = stream.size() if pos > fsize: stream.seek(fsize) return -1 elif pos < 0: stream.seek(0) return -1 diff = pos - curr_pos if diff < 0: stream.seek(pos) return 0 data = stream.read(diff) # let the ctxt automatically append numbers, as needed, unless the previous # child was also a skipped field skipped_name = "_skipped" if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[-1]._pfp__name.startswith("_skipped"): old_name = ctxt._pfp__children[-1]._pfp__name data = ctxt._pfp__children[-1].raw_data + data skipped_name = old_name ctxt._pfp__children = ctxt._pfp__children[:-1] del ctxt._pfp__children_map[old_name] tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data)) new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream) ctxt._pfp__add_child(skipped_name, new_field, stream) scope.add_var(skipped_name, new_field) return 0
Returns 0 if successful or -1 if the address is out of range def FSkip(params, ctxt, scope, stream, coord): """Returns 0 if successful or -1 if the address is out of range """ if len(params) != 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSkip accepts only one argument") skip_amt = PYVAL(params[0]) pos = skip_amt + stream.tell() return FSeek([pos], ctxt, scope, stream, coord)
``PackerGZip`` - implements both unpacking and packing. Can be used as the ``packer`` for a field. When packing, concats the build output of all params and gzip-compresses the result. When unpacking, concats the build output of all params and gzip-decompresses the result. Example: The code below specifies that the ``data`` field is gzipped and that once decompressed, should be parsed with ``PACK_TYPE``. When building the ``PACK_TYPE`` structure, ``data`` will be updated with the compressed data.:: char data[0x100]<packer=PackerGZip, packtype=PACK_TYPE>; :pack: True if the data should be packed, false if it should be unpacked :data: The data to operate on :returns: An array def packer_gzip(params, ctxt, scope, stream, coord): """``PackerGZip`` - implements both unpacking and packing. Can be used as the ``packer`` for a field. When packing, concats the build output of all params and gzip-compresses the result. When unpacking, concats the build output of all params and gzip-decompresses the result. Example: The code below specifies that the ``data`` field is gzipped and that once decompressed, should be parsed with ``PACK_TYPE``. When building the ``PACK_TYPE`` structure, ``data`` will be updated with the compressed data.:: char data[0x100]<packer=PackerGZip, packtype=PACK_TYPE>; :pack: True if the data should be packed, false if it should be unpacked :data: The data to operate on :returns: An array """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") # to gzip it (pack it) if params[0]: return pack_gzip(params[1:], ctxt, scope, stream, coord) else: return unpack_gzip(params[1:], ctxt, scope, stream, coord)
``PackGZip`` - Concats the build output of all params and gzips the resulting data, returning a char array. Example: :: char data[0x100]<pack=PackGZip, ...>; def pack_gzip(params, ctxt, scope, stream, coord): """``PackGZip`` - Concats the build output of all params and gzips the resulting data, returning a char array. Example: :: char data[0x100]<pack=PackGZip, ...>; """ if len(params) == 0: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least one argument") built = utils.binary("") for param in params: if isinstance(param, pfp.fields.Field): built += param._pfp__build() else: built += param return zlib.compress(built)
WatchLength - Watch the total length of each of the params. Example: The code below uses the ``WatchLength`` update function to update the ``length`` field to the length of the ``data`` field :: int length<watch=data, update=WatchLength>; char data[length]; def watch_length(params, ctxt, scope, stream, coord): """WatchLength - Watch the total length of each of the params. Example: The code below uses the ``WatchLength`` update function to update the ``length`` field to the length of the ``data`` field :: int length<watch=data, update=WatchLength>; char data[length]; """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") to_update = params[0] total_size = 0 for param in params[1:]: total_size += param._pfp__width() to_update._pfp__set_value(total_size)
WatchCrc32 - Watch the total crc32 of the params. Example: The code below uses the ``WatchCrc32`` update function to update the ``crc`` field to the crc of the ``length`` and ``data`` fields :: char length; char data[length]; int crc<watch=length;data, update=WatchCrc32>; def watch_crc(params, ctxt, scope, stream, coord): """WatchCrc32 - Watch the total crc32 of the params. Example: The code below uses the ``WatchCrc32`` update function to update the ``crc`` field to the crc of the ``length`` and ``data`` fields :: char length; char data[length]; int crc<watch=length;data, update=WatchCrc32>; """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") to_update = params[0] total_data = utils.binary("") for param in params[1:]: total_data += param._pfp__build() to_update._pfp__set_value(binascii.crc32(total_data))
validate folder takes a cloned github repo, ensures the existence of the config.json, and validates it. def _validate_folder(self, folder=None): ''' validate folder takes a cloned github repo, ensures the existence of the config.json, and validates it. ''' from expfactory.experiment import load_experiment if folder is None: folder=os.path.abspath(os.getcwd()) config = load_experiment(folder, return_path=True) if not config: return notvalid("%s is not an experiment." %(folder)) return self._validate_config(folder)
validate is the entrypoint to all validation, for a folder, config, or url. If a URL is found, it is cloned and cleaned up. :param validate_folder: ensures the folder name (github repo) matches. def validate(self, folder, cleanup=False, validate_folder=True): ''' validate is the entrypoint to all validation, for a folder, config, or url. If a URL is found, it is cloned and cleaned up. :param validate_folder: ensures the folder name (github repo) matches. ''' # Obtain any repository URL provided if folder.startswith('http') or 'github' in folder: folder = clone(folder, tmpdir=self.tmpdir) # Load config.json if provided directly elif os.path.basename(folder) == 'config.json': config = os.path.dirname(folder) return self._validate_config(config, validate_folder) # Otherwise, validate folder and cleanup valid = self._validate_folder(folder) if cleanup is True: shutil.rmtree(folder) return valid
validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id def _validate_config(self, folder, validate_folder=True): ''' validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id ''' config = "%s/config.json" % folder name = os.path.basename(folder) if not os.path.exists(config): return notvalid("%s: config.json not found." %(folder)) # Load the config try: config = read_json(config) except: return notvalid("%s: cannot load json, invalid." %(name)) # Config.json should be single dict if isinstance(config, list): return notvalid("%s: config.json is a list, not valid." %(name)) # Check over required fields fields = self.get_validation_fields() for field,value,ftype in fields: bot.verbose('field: %s, required: %s' %(field,value)) # Field must be in the keys if required if field not in config.keys(): if value == 1: return notvalid("%s: config.json is missing required field %s" %(name,field)) # Field is present, check type else: if not isinstance(config[field], ftype): return notvalid("%s: invalid type, must be %s." %(name,str(ftype))) # Expid gets special treatment if field == "exp_id" and validate_folder is True: if config[field] != name: return notvalid("%s: exp_id parameter %s does not match folder name." %(name,config[field])) # name cannot have special characters, only _ and letters/numbers if not re.match("^[a-z0-9_-]*$", config[field]): message = "%s: exp_id parameter %s has invalid characters" message += "only lowercase [a-z],[0-9], -, and _ allowed." return notvalid(message %(name,config[field])) return True
get_validation_fields returns a list of tuples (each a field) we only require the exp_id to coincide with the folder name, for the sake of reproducibility (given that all are served from sample image or Github organization). All other fields are optional. To specify runtime variables, add to "experiment_variables" 0: not required, no warning 1: required, not valid 2: not required, warning type: indicates the variable type def get_validation_fields(self): '''get_validation_fields returns a list of tuples (each a field) we only require the exp_id to coincide with the folder name, for the sake of reproducibility (given that all are served from sample image or Github organization). All other fields are optional. To specify runtime variables, add to "experiment_variables" 0: not required, no warning 1: required, not valid 2: not required, warning type: indicates the variable type ''' return [("name",1,str), # required ("time",1,int), ("url",1,str), ("description",1, str), ("instructions",1, str), ("exp_id",1,str), ("install",0, list), # list of commands to install / build experiment ("contributors",0, list), # not required ("reference",0, list), ("cognitive_atlas_task_id",0,str), ("template",0,str)]
get_runtime_vars will return the urlparsed string of one or more runtime variables. If None are present, None is returned. Parameters ========== varset: the variable set, a dictionary lookup with exp_id, token, vars experiment: the exp_id to look up token: the participant id (or token) that must be defined. Returns ======= url: the variable portion of the url to be passed to experiment, e.g, '?words=at the thing&color=red&globalname=globalvalue' def get_runtime_vars(varset, experiment, token): '''get_runtime_vars will return the urlparsed string of one or more runtime variables. If None are present, None is returned. Parameters ========== varset: the variable set, a dictionary lookup with exp_id, token, vars experiment: the exp_id to look up token: the participant id (or token) that must be defined. Returns ======= url: the variable portion of the url to be passed to experiment, e.g, '?words=at the thing&color=red&globalname=globalvalue' ''' url = '' if experiment in varset: variables = dict() # Participant set variables if token in varset[experiment]: for k,v in varset[experiment][token].items(): variables[k] = v # Global set variables if "*" in varset[experiment]: for k,v in varset[experiment]['*'].items(): # Only add the variable if not already defined if k not in variables: variables[k] = v # Join together, the first ? is added by calling function varlist = ["%s=%s" %(k,v) for k,v in variables.items()] url = '&'.join(varlist) bot.debug('Parsed url: %s' %url) return url
generate a lookup data structure from a delimited file. We typically obtain the file name and delimiter from the environment by way of EXPFACTORY_RUNTIME_VARS, and EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse from a custom variable file by way of specifying it to the function (preference is given here). The file should be csv, with the only required first header field as "token" and second as "exp_id" to distinguish the participant ID and experiment id. The subsequent columns should correspond to experiment variable names. No special parsing of either is done. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= varset: a dictionary lookup by exp_id and then participant ID. { 'test-parse-url': { '123': { 'color': 'red', 'globalname': 'globalvalue', 'words': 'at the thing' }, '456': {'color': 'blue', 'globalname': 'globalvalue', 'words': 'omg tacos'} } } def generate_runtime_vars(variable_file=None, sep=','): '''generate a lookup data structure from a delimited file. We typically obtain the file name and delimiter from the environment by way of EXPFACTORY_RUNTIME_VARS, and EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse from a custom variable file by way of specifying it to the function (preference is given here). The file should be csv, with the only required first header field as "token" and second as "exp_id" to distinguish the participant ID and experiment id. The subsequent columns should correspond to experiment variable names. No special parsing of either is done. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= varset: a dictionary lookup by exp_id and then participant ID. { 'test-parse-url': { '123': { 'color': 'red', 'globalname': 'globalvalue', 'words': 'at the thing' }, '456': {'color': 'blue', 'globalname': 'globalvalue', 'words': 'omg tacos'} } } ''' # First preference goes to runtime, then environment, then unset if variable_file is None: if EXPFACTORY_RUNTIME_VARS is not None: variable_file = EXPFACTORY_RUNTIME_VARS if variable_file is not None: if not os.path.exists(variable_file): bot.warning('%s is set, but not found' %variable_file) return variable_file # If still None, no file if variable_file is None: return variable_file # If we get here, we have a variable file that exists delim = sep if EXPFACTORY_RUNTIME_DELIM is not None: delim = EXPFACTORY_RUNTIME_DELIM bot.debug('Delim for variables file set to %s' %sep) # Read in the file, generate config varset = dict() rows = _read_runtime_vars(variable_file) if len(rows) > 0: # When we get here, we are sure to have # 'exp_id', 'var_name', 'var_value', 'token' for row in rows: exp_id = row[0].lower() # exp-id must be lowercase var_name = row[1] var_value = row[2] token = row[3] # Level 1: Experiment ID if exp_id not in varset: varset[exp_id] = {} # Level 2: Participant ID if token not in varset[exp_id]: varset[exp_id][token] = {} # If found global setting, courtesy debug message if token == "*": bot.debug('Found global variable %s' %var_name) # Level 3: is the variable, issue warning if already defined if var_name in varset[exp_id][token]: bot.warning('%s defined twice %s:%s' %(var_name, exp_id, token)) varset[exp_id][token][var_name] = var_value return varset
read the entire runtime variable file, and return a list of lists, each corresponding to a row. We also check the header, and exit if anything is missing or malformed. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= valid_rows: a list of lists, each a valid row [['test-parse-url', 'globalname', 'globalvalue', '*'], ['test-parse-url', 'color', 'red', '123'], ['test-parse-url', 'color', 'blue', '456'], ['test-parse-url', 'words', 'at the thing', '123'], ['test-parse-url', 'words', 'omg tacos', '456']] def _read_runtime_vars(variable_file, sep=','): '''read the entire runtime variable file, and return a list of lists, each corresponding to a row. We also check the header, and exit if anything is missing or malformed. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= valid_rows: a list of lists, each a valid row [['test-parse-url', 'globalname', 'globalvalue', '*'], ['test-parse-url', 'color', 'red', '123'], ['test-parse-url', 'color', 'blue', '456'], ['test-parse-url', 'words', 'at the thing', '123'], ['test-parse-url', 'words', 'omg tacos', '456']] ''' rows = [x for x in read_file(variable_file).split('\n') if x.strip()] valid_rows = [] if len(rows) > 0: # Validate header and rows, exit if not valid header = rows.pop(0).split(sep) validate_header(header) for row in rows: row = _validate_row(row, sep=sep, required_length=4) # If the row is returned, it is valid if row: valid_rows.append(row) return valid_rows
validate_row will ensure that a row has the proper length, and is not empty and cleaned of extra spaces. Parameters ========== row: a single row, not yet parsed. Returns a valid row, or None if not valid def _validate_row(row, sep=',', required_length=None): '''validate_row will ensure that a row has the proper length, and is not empty and cleaned of extra spaces. Parameters ========== row: a single row, not yet parsed. Returns a valid row, or None if not valid ''' if not isinstance(row, list): row = _parse_row(row, sep) if required_length: length = len(row) if length != required_length: bot.warning('Row should have length %s (not %s)' %(required_length, length)) bot.warning(row) row = None return row
parse row is a helper function to simply clean up a string, and parse into a row based on a delimiter. If a required length is provided, we check for this too. def _parse_row(row, sep=','): '''parse row is a helper function to simply clean up a string, and parse into a row based on a delimiter. If a required length is provided, we check for this too. ''' parsed = row.split(sep) parsed = [x for x in parsed if x.strip()] return parsed
validate_header ensures that the first row contains the exp_id, var_name, var_value, and token. Capitalization isn't important, but ordering is. This criteria is very strict, but it's reasonable to require. Parameters ========== header: the header row, as a list required_fields: a list of required fields. We derive the required length from this list. Does not return, instead exits if malformed. Runs silently if OK. def validate_header(header, required_fields=None): '''validate_header ensures that the first row contains the exp_id, var_name, var_value, and token. Capitalization isn't important, but ordering is. This criteria is very strict, but it's reasonable to require. Parameters ========== header: the header row, as a list required_fields: a list of required fields. We derive the required length from this list. Does not return, instead exits if malformed. Runs silently if OK. ''' if required_fields is None: required_fields = ['exp_id', 'var_name', 'var_value', 'token'] # The required length of the header based on required fields length = len(required_fields) # This is very strict, but no reason not to be header = _validate_row(header, required_length=length) header = [x.lower() for x in header] for idx in range(length): field = header[idx].lower().strip() if required_fields[idx] != field: bot.error('Malformed header field %s, exiting.' %field) sys.exit(1)
Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. def superuser_required(view_func): """ Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. """ @wraps(view_func) def _checklogin(request, *args, **kwargs): if request.user.is_active and request.user.is_superuser: # The user is valid. Continue to the admin page. return view_func(request, *args, **kwargs) assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'." defaults = { 'template_name': 'admin/login.html', 'redirect_field_name': request.get_full_path(), 'authentication_form': AdminAuthenticationForm, 'extra_context': { 'title': _('Log in'), 'app_path': request.get_full_path() } } return LoginView(request, **defaults) return _checklogin
Compiles the pattern lines. *pattern_factory* can be either the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. Returns the :class:`PathSpec` instance. def from_lines(cls, pattern_factory, lines): """ Compiles the pattern lines. *pattern_factory* can be either the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. Returns the :class:`PathSpec` instance. """ if isinstance(pattern_factory, string_types): pattern_factory = util.lookup_pattern(pattern_factory) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if isinstance(lines, (bytes, unicode)): raise TypeError("lines:{!r} is not an iterable.".format(lines)) lines = [pattern_factory(line) for line in lines if line] return cls(lines)
Matches the file to this path-spec. *file* (:class:`str`) is the file path to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns :data:`True` if *file* matched; otherwise, :data:`False`. def match_file(self, file, separators=None): """ Matches the file to this path-spec. *file* (:class:`str`) is the file path to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """ norm_file = util.normalize_file(file, separators=separators) return util.match_file(self.patterns, norm_file)
Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). def match_files(self, files, separators=None): """ Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """ if isinstance(files, (bytes, unicode)): raise TypeError("files:{!r} is not an iterable.".format(files)) file_map = util.normalize_files(files, separators=separators) matched_files = util.match_files(self.patterns, iterkeys(file_map)) for path in matched_files: yield file_map[path]
Walks the specified root path for all files and matches them to this path-spec. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. See :func:`~pathspec.util.iter_tree` for more information. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. See :func:`~pathspec.util.iter_tree` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). def match_tree(self, root, on_error=None, follow_links=None): """ Walks the specified root path for all files and matches them to this path-spec. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. See :func:`~pathspec.util.iter_tree` for more information. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. See :func:`~pathspec.util.iter_tree` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """ files = util.iter_tree(root, on_error=on_error, follow_links=follow_links) return self.match_files(files)
Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`). def pattern_to_regex(cls, pattern): """ Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`). """ if isinstance(pattern, unicode): return_type = unicode elif isinstance(pattern, bytes): return_type = bytes pattern = pattern.decode(_BYTES_ENCODING) else: raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern)) pattern = pattern.strip() if pattern.startswith('#'): # A pattern starting with a hash ('#') serves as a comment # (neither includes nor excludes files). Escape the hash with a # back-slash to match a literal hash (i.e., '\#'). regex = None include = None elif pattern == '/': # EDGE CASE: According to `git check-ignore` (v2.4.1), a single # '/' does not match any file. regex = None include = None elif pattern: if pattern.startswith('!'): # A pattern starting with an exclamation mark ('!') negates the # pattern (exclude instead of include). Escape the exclamation # mark with a back-slash to match a literal exclamation mark # (i.e., '\!'). include = False # Remove leading exclamation mark. pattern = pattern[1:] else: include = True if pattern.startswith('\\'): # Remove leading back-slash escape for escaped hash ('#') or # exclamation mark ('!'). pattern = pattern[1:] # Split pattern into segments. pattern_segs = pattern.split('/') # Normalize pattern to make processing easier. if not pattern_segs[0]: # A pattern beginning with a slash ('/') will only match paths # directly on the root directory instead of any descendant # paths. So, remove empty first segment to make pattern relative # to root. del pattern_segs[0] elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]): # A single pattern without a beginning slash ('/') will match # any descendant path. This is equivalent to "**/{pattern}". So, # prepend with double-asterisks to make pattern relative to # root. # EDGE CASE: This also holds for a single pattern with a # trailing slash (e.g. dir/). if pattern_segs[0] != '**': pattern_segs.insert(0, '**') else: # EDGE CASE: A pattern without a beginning slash ('/') but # contains at least one prepended directory (e.g. # "dir/{pattern}") should not match "**/dir/{pattern}", # according to `git check-ignore` (v2.4.1). pass if not pattern_segs[-1] and len(pattern_segs) > 1: # A pattern ending with a slash ('/') will match all descendant # paths if it is a directory but not if it is a regular file. # This is equivilent to "{pattern}/**". So, set last segment to # double asterisks to include all descendants. pattern_segs[-1] = '**' # Build regular expression from pattern. output = ['^'] need_slash = False end = len(pattern_segs) - 1 for i, seg in enumerate(pattern_segs): if seg == '**': if i == 0 and i == end: # A pattern consisting solely of double-asterisks ('**') # will match every path. output.append('.+') elif i == 0: # A normalized pattern beginning with double-asterisks # ('**') will match any leading path segments. output.append('(?:.+/)?') need_slash = False elif i == end: # A normalized pattern ending with double-asterisks ('**') # will match any trailing path segments. output.append('/.*') else: # A pattern with inner double-asterisks ('**') will match # multiple (or zero) inner path segments. output.append('(?:/.+)?') need_slash = True elif seg == '*': # Match single path segment. if need_slash: output.append('/') output.append('[^/]+') need_slash = True else: # Match segment glob pattern. if need_slash: output.append('/') output.append(cls._translate_segment_glob(seg)) if i == end and include is True: # A pattern ending without a slash ('/') will match a file # or a directory (with paths underneath it). E.g., "foo" # matches "foo", "foo/bar", "foo/bar/baz", etc. # EDGE CASE: However, this does not hold for exclusion cases # according to `git check-ignore` (v2.4.1). output.append('(?:/.*)?') need_slash = True output.append('$') regex = ''.join(output) else: # A blank pattern is a null-operation (neither includes nor # excludes files). regex = None include = None if regex is not None and return_type is bytes: regex = regex.encode(_BYTES_ENCODING) return regex, include
Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). def _translate_segment_glob(pattern): """ Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). """ # NOTE: This is derived from `fnmatch.translate()` and is similar to # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. escape = False regex = '' i, end = 0, len(pattern) while i < end: # Get next character. char = pattern[i] i += 1 if escape: # Escape the character. escape = False regex += re.escape(char) elif char == '\\': # Escape character, escape next character. escape = True elif char == '*': # Multi-character wildcard. Match any string (except slashes), # including an empty string. regex += '[^/]*' elif char == '?': # Single-character wildcard. Match any single character (except # a slash). regex += '[^/]' elif char == '[': # Braket expression wildcard. Except for the beginning # exclamation mark, the whole braket expression can be used # directly as regex but we have to find where the expression # ends. # - "[][!]" matchs ']', '[' and '!'. # - "[]-]" matchs ']' and '-'. # - "[!]a-]" matchs any character except ']', 'a' and '-'. j = i # Pass brack expression negation. if j < end and pattern[j] == '!': j += 1 # Pass first closing braket if it is at the beginning of the # expression. if j < end and pattern[j] == ']': j += 1 # Find closing braket. Stop once we reach the end or find it. while j < end and pattern[j] != ']': j += 1 if j < end: # Found end of braket expression. Increment j to be one past # the closing braket: # # [...] # ^ ^ # i j # j += 1 expr = '[' if pattern[i] == '!': # Braket expression needs to be negated. expr += '^' i += 1 elif pattern[i] == '^': # POSIX declares that the regex braket expression negation # "[^...]" is undefined in a glob pattern. Python's # `fnmatch.translate()` escapes the caret ('^') as a # literal. To maintain consistency with undefined behavior, # I am escaping the '^' as well. expr += '\\^' i += 1 # Build regex braket expression. Escape slashes so they are # treated as literal slashes by regex as defined by POSIX. expr += pattern[i:j].replace('\\', '\\\\') # Add regex braket expression to regex result. regex += expr # Set i to one past the closing braket. i = j else: # Failed to find closing braket, treat opening braket as a # braket literal instead of as an expression. regex += '\\[' else: # Regular character, escape it for regex. regex += re.escape(char) return regex
Warn about deprecation. def pattern_to_regex(cls, *args, **kw): """ Warn about deprecation. """ cls._deprecated() return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. def iter_tree(root, on_error=None, follow_links=None): """ Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. """ if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links): yield file_rel
Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """ dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`. def match_file(patterns, file): """ Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """ matched = False for pattern in patterns: if pattern.include is not None: if file in pattern.match((file,)): matched = pattern.include return matched
Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). def match_files(patterns, files): """ Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). """ all_files = files if isinstance(files, collection_type) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`). def normalize_file(file, separators=None): """ Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`). """ # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS norm_file = file for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file
Normalizes the file paths to use the POSIX path separator. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the original file path (:class:`str`) def normalize_files(files, separators=None): """ Normalizes the file paths to use the POSIX path separator. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the original file path (:class:`str`) """ norm_files = {} for path in files: norm_files[normalize_file(path, separators=separators)] = path return norm_files
Registers the specified pattern factory. *name* (:class:`str`) is the name to register the pattern factory under. *pattern_factory* (:class:`~collections.abc.Callable`) is used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *override* (:class:`bool` or :data:`None`) optionally is whether to allow overriding an already registered pattern under the same name (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` (:data:`False`). Default is :data:`None` for :data:`False`. def register_pattern(name, pattern_factory, override=None): """ Registers the specified pattern factory. *name* (:class:`str`) is the name to register the pattern factory under. *pattern_factory* (:class:`~collections.abc.Callable`) is used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *override* (:class:`bool` or :data:`None`) optionally is whether to allow overriding an already registered pattern under the same name (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` (:data:`False`). Default is :data:`None` for :data:`False`. """ if not isinstance(name, string_types): raise TypeError("name:{!r} is not a string.".format(name)) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if name in _registered_patterns and not override: raise AlreadyRegisteredError(name, _registered_patterns[name]) _registered_patterns[name] = pattern_factory
*message* (:class:`str`) is the error message. def message(self): """ *message* (:class:`str`) is the error message. """ return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format( real=self.real_path, first=self.first_path, second=self.second_path, )
Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., ``"relative/path/to/file"``). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). def match(self, files): """ Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., ``"relative/path/to/file"``). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). """ raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., "relative/path/to/file"). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). def match(self, files): """ Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., "relative/path/to/file"). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). """ if self.include is not None: for path in files: if self.regex.match(path) is not None: yield path
Convert a User to a cached instance representation. def user_default_serializer(self, obj): """Convert a User to a cached instance representation.""" if not obj: return None self.user_default_add_related_pks(obj) return dict(( ('id', obj.id), ('username', obj.username), self.field_to_json('DateTime', 'date_joined', obj.date_joined), self.field_to_json( 'PKList', 'votes', model=Choice, pks=obj._votes_pks), ))
Load a User from the database. def user_default_loader(self, pk): """Load a User from the database.""" try: obj = User.objects.get(pk=pk) except User.DoesNotExist: return None else: self.user_default_add_related_pks(obj) return obj
Add related primary keys to a User instance. def user_default_add_related_pks(self, obj): """Add related primary keys to a User instance.""" if not hasattr(obj, '_votes_pks'): obj._votes_pks = list(obj.votes.values_list('pk', flat=True))
Invalidated cached items when the Group changes. def group_default_invalidator(self, obj): """Invalidated cached items when the Group changes.""" user_pks = User.objects.values_list('pk', flat=True) return [('User', pk, False) for pk in user_pks]
Convert a Question to a cached instance representation. def question_default_serializer(self, obj): """Convert a Question to a cached instance representation.""" if not obj: return None self.question_default_add_related_pks(obj) return dict(( ('id', obj.id), ('question_text', obj.question_text), self.field_to_json('DateTime', 'pub_date', obj.pub_date), self.field_to_json( 'PKList', 'choices', model=Choice, pks=obj._choice_pks), ))
Load a Question from the database. def question_default_loader(self, pk): """Load a Question from the database.""" try: obj = Question.objects.get(pk=pk) except Question.DoesNotExist: return None else: self.question_default_add_related_pks(obj) return obj
Add related primary keys to a Question instance. def question_default_add_related_pks(self, obj): """Add related primary keys to a Question instance.""" if not hasattr(obj, '_choice_pks'): obj._choice_pks = list(obj.choices.values_list('pk', flat=True))
Convert a Choice to a cached instance representation. def choice_default_serializer(self, obj): """Convert a Choice to a cached instance representation.""" if not obj: return None self.choice_default_add_related_pks(obj) return dict(( ('id', obj.id), ('choice_text', obj.choice_text), self.field_to_json( 'PK', 'question', model=Question, pk=obj.question_id), self.field_to_json( 'PKList', 'voters', model=User, pks=obj._voter_pks) ))
Load a Choice from the database. def choice_default_loader(self, pk): """Load a Choice from the database.""" try: obj = Choice.objects.get(pk=pk) except Choice.DoesNotExist: return None else: self.choice_default_add_related_pks(obj) return obj
Add related primary keys to a Choice instance. def choice_default_add_related_pks(self, obj): """Add related primary keys to a Choice instance.""" if not hasattr(obj, '_voter_pks'): obj._voter_pks = obj.voters.values_list('pk', flat=True)
Invalidated cached items when the Choice changes. def choice_default_invalidator(self, obj): """Invalidated cached items when the Choice changes.""" invalid = [('Question', obj.question_id, True)] for pk in obj.voters.values_list('pk', flat=True): invalid.append(('User', pk, False)) return invalid
Get the Django cache interface. This allows disabling the cache with settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that Django Debug Toolbar will record cache requests. def cache(self): """Get the Django cache interface. This allows disabling the cache with settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that Django Debug Toolbar will record cache requests. """ if not self._cache: use_cache = getattr(settings, 'USE_DRF_INSTANCE_CACHE', True) if use_cache: from django.core.cache import cache self._cache = cache return self._cache
Delete all versions of a cached instance. def delete_all_versions(self, model_name, obj_pk): """Delete all versions of a cached instance.""" if self.cache: for version in self.versions: key = self.key_for(version, model_name, obj_pk) self.cache.delete(key)
Return the model-specific caching function. def model_function(self, model_name, version, func_name): """Return the model-specific caching function.""" assert func_name in ('serializer', 'loader', 'invalidator') name = "%s_%s_%s" % (model_name.lower(), version, func_name) return getattr(self, name)
Return the field function. def field_function(self, type_code, func_name): """Return the field function.""" assert func_name in ('to_json', 'from_json') name = "field_%s_%s" % (type_code.lower(), func_name) return getattr(self, name)
Convert a field to a JSON-serializable representation. def field_to_json(self, type_code, key, *args, **kwargs): """Convert a field to a JSON-serializable representation.""" assert ':' not in key to_json = self.field_function(type_code, 'to_json') key_and_type = "%s:%s" % (key, type_code) json_value = to_json(*args, **kwargs) return key_and_type, json_value
Convert a JSON-serializable representation back to a field. def field_from_json(self, key_and_type, json_value): """Convert a JSON-serializable representation back to a field.""" assert ':' in key_and_type key, type_code = key_and_type.split(':', 1) from_json = self.field_function(type_code, 'from_json') value = from_json(json_value) return key, value
Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None) def get_instances(self, object_specs, version=None): """Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None) """ ret = dict() spec_keys = set() cache_keys = [] version = version or self.default_version # Construct all the cache keys to fetch for model_name, obj_pk, obj in object_specs: assert model_name assert obj_pk # Get cache keys to fetch obj_key = self.key_for(version, model_name, obj_pk) spec_keys.add((model_name, obj_pk, obj, obj_key)) cache_keys.append(obj_key) # Fetch the cache keys if cache_keys and self.cache: cache_vals = self.cache.get_many(cache_keys) else: cache_vals = {} # Use cached representations, or recreate cache_to_set = {} for model_name, obj_pk, obj, obj_key in spec_keys: # Load cached objects obj_val = cache_vals.get(obj_key) obj_native = json.loads(obj_val) if obj_val else None # Invalid or not set - load from database if not obj_native: if not obj: loader = self.model_function(model_name, version, 'loader') obj = loader(obj_pk) serializer = self.model_function( model_name, version, 'serializer') obj_native = serializer(obj) or {} if obj_native: cache_to_set[obj_key] = json.dumps(obj_native) # Get fields to convert keys = [key for key in obj_native.keys() if ':' in key] for key in keys: json_value = obj_native.pop(key) name, value = self.field_from_json(key, json_value) assert name not in obj_native obj_native[name] = value if obj_native: ret[(model_name, obj_pk)] = (obj_native, obj_key, obj) # Save any new cached representations if cache_to_set and self.cache: self.cache.set_many(cache_to_set) return ret
Create or update a cached instance. Keyword arguments are: model_name - The name of the model pk - The primary key of the instance instance - The Django model instance, or None to load it versions - Version to update, or None for all update_only - If False (default), then missing cache entries will be populated and will cause follow-on invalidation. If True, then only entries already in the cache will be updated and cause follow-on invalidation. Return is a list of tuples (model name, pk, immediate) that also needs to be updated. def update_instance( self, model_name, pk, instance=None, version=None, update_only=False): """Create or update a cached instance. Keyword arguments are: model_name - The name of the model pk - The primary key of the instance instance - The Django model instance, or None to load it versions - Version to update, or None for all update_only - If False (default), then missing cache entries will be populated and will cause follow-on invalidation. If True, then only entries already in the cache will be updated and cause follow-on invalidation. Return is a list of tuples (model name, pk, immediate) that also needs to be updated. """ versions = [version] if version else self.versions invalid = [] for version in versions: serializer = self.model_function(model_name, version, 'serializer') loader = self.model_function(model_name, version, 'loader') invalidator = self.model_function( model_name, version, 'invalidator') if serializer is None and loader is None and invalidator is None: continue if self.cache is None: continue # Try to load the instance if not instance: instance = loader(pk) if serializer: # Get current value, if in cache key = self.key_for(version, model_name, pk) current_raw = self.cache.get(key) current = json.loads(current_raw) if current_raw else None # Get new value if update_only and current_raw is None: new = None else: new = serializer(instance) deleted = not instance # If cache is invalid, update cache invalidate = (current != new) or deleted if invalidate: if deleted: self.cache.delete(key) else: self.cache.set(key, json.dumps(new)) else: invalidate = True # Invalidate upstream caches if instance and invalidate: for upstream in invalidator(instance): if isinstance(upstream, str): self.cache.delete(upstream) else: m, i, immediate = upstream if immediate: invalidate_key = self.key_for(version, m, i) self.cache.delete(invalidate_key) invalid.append((m, i, version)) return invalid
Convert a date to a date triple. def field_date_to_json(self, day): """Convert a date to a date triple.""" if isinstance(day, six.string_types): day = parse_date(day) return [day.year, day.month, day.day] if day else None
Convert a UTC timestamp to a UTC datetime. def field_datetime_from_json(self, json_val): """Convert a UTC timestamp to a UTC datetime.""" if type(json_val) == int: seconds = int(json_val) dt = datetime.fromtimestamp(seconds, utc) elif json_val is None: dt = None else: seconds, microseconds = [int(x) for x in json_val.split('.')] dt = datetime.fromtimestamp(seconds, utc) dt += timedelta(microseconds=microseconds) return dt
Convert a datetime to a UTC timestamp w/ microsecond resolution. datetimes w/o timezone will be assumed to be in UTC def field_datetime_to_json(self, dt): """Convert a datetime to a UTC timestamp w/ microsecond resolution. datetimes w/o timezone will be assumed to be in UTC """ if isinstance(dt, six.string_types): dt = parse_datetime(dt) if not dt: return None ts = timegm(dt.utctimetuple()) if dt.microsecond: return "{0}.{1:0>6d}".format(ts, dt.microsecond) else: return ts
Convert json_val to a timedelta object. json_val contains total number of seconds in the timedelta. If json_val is a string it will be converted to a float. def field_timedelta_from_json(self, json_val): """Convert json_val to a timedelta object. json_val contains total number of seconds in the timedelta. If json_val is a string it will be converted to a float. """ if isinstance(json_val, str): return timedelta(seconds=float(json_val)) elif json_val is None: return None else: return timedelta(seconds=json_val)
Convert timedelta to value containing total number of seconds. If there are fractions of a second the return value will be a string, otherwise it will be an int. def field_timedelta_to_json(self, td): """Convert timedelta to value containing total number of seconds. If there are fractions of a second the return value will be a string, otherwise it will be an int. """ if isinstance(td, six.string_types): td = parse_duration(td) if not td: return None if td.microseconds > 0: return str(td.total_seconds()) else: return int(td.total_seconds())
Load a PkOnlyQueryset from a JSON dict. This uses the same format as cached_queryset_from_json def field_pklist_from_json(self, data): """Load a PkOnlyQueryset from a JSON dict. This uses the same format as cached_queryset_from_json """ model = get_model(data['app'], data['model']) return PkOnlyQueryset(self, model, data['pks'])
Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json def field_pklist_to_json(self, model, pks): """Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json """ app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pks': list(pks), }
Load a PkOnlyModel from a JSON dict. def field_pk_from_json(self, data): """Load a PkOnlyModel from a JSON dict.""" model = get_model(data['app'], data['model']) return PkOnlyModel(self, model, data['pk'])
Convert a primary key to a JSON dict. def field_pk_to_json(self, model, pk): """Convert a primary key to a JSON dict.""" app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pk': pk, }
Update cache when choice.voters changes. def choice_voters_changed_update_cache( sender, instance, action, reverse, model, pk_set, **kwargs): """Update cache when choice.voters changes.""" if action not in ('post_add', 'post_remove', 'post_clear'): # post_clear is not handled, because clear is called in # django.db.models.fields.related.ReverseManyRelatedObjects.__set__ # before setting the new order return if model == User: assert type(instance) == Choice choices = [instance] if pk_set: users = list(User.objects.filter(pk__in=pk_set)) else: users = [] else: if pk_set: choices = list(Choice.objects.filter(pk__in=pk_set)) else: choices = [] users = [instance] from .tasks import update_cache_for_instance for choice in choices: update_cache_for_instance('Choice', choice.pk, choice) for user in users: update_cache_for_instance('User', user.pk, user)
Update the cache when an instance is deleted. def post_delete_update_cache(sender, instance, **kwargs): """Update the cache when an instance is deleted.""" name = sender.__name__ if name in cached_model_names: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
Update the cache when an instance is created or modified. def post_save_update_cache(sender, instance, created, raw, **kwargs): """Update the cache when an instance is created or modified.""" if raw: return name = sender.__name__ if name in cached_model_names: delay_cache = getattr(instance, '_delay_cache', False) if not delay_cache: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset def get_queryset(self): """Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset """ queryset = super(CachedViewMixin, self).get_queryset() if self.action in ('list', 'retrieve'): return CachedQueryset(self.get_queryset_cache(), queryset=queryset) else: return queryset
Return the object the view is displaying. Same as rest_framework.generics.GenericAPIView, but: - Failed assertions instead of deprecations def get_object(self, queryset=None): """ Return the object the view is displaying. Same as rest_framework.generics.GenericAPIView, but: - Failed assertions instead of deprecations """ # Determine the base queryset to use. assert queryset is None, "Passing a queryset is disabled" queryset = self.filter_queryset(self.get_queryset()) # Perform the lookup filtering. lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field lookup = self.kwargs.get(lookup_url_kwarg, None) assert lookup is not None, "Other lookup methods are disabled" filter_kwargs = {self.lookup_field: lookup} obj = self.get_object_or_404(queryset, **filter_kwargs) # May raise a permission denied self.check_object_permissions(self.request, obj) return obj
Return an object or raise a 404. Same as Django's standard shortcut, but make sure to raise 404 if the filter_kwargs don't match the required types. def get_object_or_404(self, queryset, *filter_args, **filter_kwargs): """Return an object or raise a 404. Same as Django's standard shortcut, but make sure to raise 404 if the filter_kwargs don't match the required types. """ if isinstance(queryset, CachedQueryset): try: return queryset.get(*filter_args, **filter_kwargs) except queryset.model.DoesNotExist: raise Http404( 'No %s matches the given query.' % queryset.model) else: return get_object_or_404(queryset, *filter_args, **filter_kwargs)
Resolve the object. This returns default (if present) or fails on an Empty. def r(self, **kwargs): """ Resolve the object. This returns default (if present) or fails on an Empty. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError if 'default' in kwargs: default = kwargs.pop('default') if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return default else: raise JSaneException( "Key does not exist: {}".format(repr(self._key_name)) )
Resolve the object. This will always succeed, since, if a lookup fails, an Empty instance will be returned farther upstream. def r(self, **kwargs): """ Resolve the object. This will always succeed, since, if a lookup fails, an Empty instance will be returned farther upstream. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError kwargs.pop('default', None) if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return self._obj
Update the cache for an instance, with cascading updates. def update_cache_for_instance( model_name, instance_pk, instance=None, version=None): """Update the cache for an instance, with cascading updates.""" cache = SampleCache() invalid = cache.update_instance(model_name, instance_pk, instance, version) for invalid_name, invalid_pk, invalid_version in invalid: update_cache_for_instance.delay( invalid_name, invalid_pk, version=invalid_version)
Return the primary keys as a list. The only valid call is values_list('pk', flat=True) def values_list(self, *args, **kwargs): """Return the primary keys as a list. The only valid call is values_list('pk', flat=True) """ flat = kwargs.pop('flat', False) assert flat is True assert len(args) == 1 assert args[0] == self.model._meta.pk.name return self.pks
Lazy-load the primary keys. def pks(self): """Lazy-load the primary keys.""" if self._primary_keys is None: self._primary_keys = list( self.queryset.values_list('pk', flat=True)) return self._primary_keys
Return a count of instances. def count(self): """Return a count of instances.""" if self._primary_keys is None: return self.queryset.count() else: return len(self.pks)
Filter the base queryset. def filter(self, **kwargs): """Filter the base queryset.""" assert not self._primary_keys self.queryset = self.queryset.filter(**kwargs) return self
Return the single item from the filtered queryset. def get(self, *args, **kwargs): """Return the single item from the filtered queryset.""" assert not args assert list(kwargs.keys()) == ['pk'] pk = kwargs['pk'] model_name = self.model.__name__ object_spec = (model_name, pk, None) instances = self.cache.get_instances((object_spec,)) try: model_data = instances[(model_name, pk)][0] except KeyError: raise self.model.DoesNotExist( "No match for %r with args %r, kwargs %r" % (self.model, args, kwargs)) else: return CachedModel(self.model, model_data)
Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS and concat their values. def collect(cls): """ Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS and concat their values. """ constants = {} for method_path in WebpackConstants.get_constant_processors(): method = import_string(method_path) if not callable(method): raise ImproperlyConfigured('Constant processor "%s" is not callable' % method_path) result = method(constants) if isinstance(result, dict): constants.update(result) return constants
Validates phonenumber Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the country prefix is absent. def phonenumber_validation(data): """ Validates phonenumber Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the country prefix is absent. """ from phonenumber_field.phonenumber import to_python phone_number = to_python(data) if not phone_number: return data elif not phone_number.country_code: raise serializers.ValidationError(_("Phone number needs to include valid country code (E.g +37255555555).")) elif not phone_number.is_valid(): raise serializers.ValidationError(_('The phone number entered is not valid.')) return data
Create Django translation catalogue for `locale`. def get_catalog(self, locale): """Create Django translation catalogue for `locale`.""" with translation.override(locale): translation_engine = DjangoTranslation(locale, domain=self.domain, localedirs=self.paths) trans_cat = translation_engine._catalog trans_fallback_cat = translation_engine._fallback._catalog if translation_engine._fallback else {} return trans_cat, trans_fallback_cat
Create list of matching packages for translation engine. def get_paths(cls, packages): """Create list of matching packages for translation engine.""" allowable_packages = dict((app_config.name, app_config) for app_config in apps.get_app_configs()) app_configs = [allowable_packages[p] for p in packages if p in allowable_packages] # paths of requested packages return [os.path.join(app.path, 'locale') for app in app_configs]
Get `.po` header value. def get_catalogue_header_value(cls, catalog, key): """Get `.po` header value.""" header_value = None if '' in catalog: for line in catalog[''].split('\n'): if line.startswith('%s:' % key): header_value = line.split(':', 1)[1].strip() return header_value
Return the number of plurals for this catalog language, or 2 if no plural string is available. def _num_plurals(self, catalogue): """ Return the number of plurals for this catalog language, or 2 if no plural string is available. """ match = re.search(r'nplurals=\s*(\d+)', self.get_plural(catalogue) or '') if match: return int(match.groups()[0]) return 2
Populate header with correct data from top-most locale file. def make_header(self, locale, catalog): """Populate header with correct data from top-most locale file.""" return { "po-revision-date": self.get_catalogue_header_value(catalog, 'PO-Revision-Date'), "mime-version": self.get_catalogue_header_value(catalog, 'MIME-Version'), "last-translator": 'Automatic <hi@thorgate.eu>', "x-generator": "Python", "language": self.get_catalogue_header_value(catalog, 'Language') or locale, "lang": locale, "content-transfer-encoding": self.get_catalogue_header_value(catalog, 'Content-Transfer-Encoding'), "project-id-version": self.get_catalogue_header_value(catalog, 'Project-Id-Version'), "pot-creation-date": self.get_catalogue_header_value(catalog, 'POT-Creation-Date'), "domain": self.domain, "report-msgid-bugs-to": self.get_catalogue_header_value(catalog, 'Report-Msgid-Bugs-To'), "content-type": self.get_catalogue_header_value(catalog, 'Content-Type'), "plural-forms": self.get_plural(catalog), "language-team": self.get_catalogue_header_value(catalog, 'Language-Team') }
Collect all `domain` translations and return `Tuple[languages, locale_data]` def collect_translations(self): """Collect all `domain` translations and return `Tuple[languages, locale_data]`""" languages = {} locale_data = {} for language_code, label in settings.LANGUAGES: languages[language_code] = '%s' % label # Create django translation engine for `language_code` trans_cat, trans_fallback_cat = self.get_catalog(language_code) # Add the meta object locale_data[language_code] = {} locale_data[language_code][""] = self.make_header(language_code, trans_cat) num_plurals = self._num_plurals(trans_cat) # Next code is largely taken from Django@master (01.10.2017) from `django.views.i18n JavaScriptCatalogue` pdict = {} seen_keys = set() for key, value in itertools.chain(six.iteritems(trans_cat), six.iteritems(trans_fallback_cat)): if key == '' or key in seen_keys: continue if isinstance(key, six.string_types): locale_data[language_code][key] = [value] elif isinstance(key, tuple): msgid, cnt = key pdict.setdefault(msgid, {})[cnt] = value else: raise TypeError(key) seen_keys.add(key) for k, v in pdict.items(): locale_data[language_code][k] = [v.get(i, '') for i in range(num_plurals)] for key, value in locale_data.items(): locale_data[key] = json.dumps(value) return languages, locale_data
Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result def get_endpoint_obj(client, endpoint, object_id): ''' Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result ''' endpoint = '/'.join([endpoint, str(object_id)]) return client.authenticated_request(endpoint).json()
Helper method to ease the repetitiveness of updating an... SO VERY DRY (That's a doubly-effective pun becuase my predecessor - https://github.com/bsmt/wunderpy - found maintaing a Python Wunderlist API to be "as tedious and boring as a liberal arts school poetry slam") def update_endpoint_obj(client, endpoint, object_id, revision, data): ''' Helper method to ease the repetitiveness of updating an... SO VERY DRY (That's a doubly-effective pun becuase my predecessor - https://github.com/bsmt/wunderpy - found maintaing a Python Wunderlist API to be "as tedious and boring as a liberal arts school poetry slam") ''' data['revision'] = int(revision) endpoint = '/'.join([endpoint, str(object_id)]) return client.authenticated_request(endpoint, 'PATCH', data=data).json()
Helper method to validate the given to a Wunderlist API request is as expected def _validate_response(self, method, response): ''' Helper method to validate the given to a Wunderlist API request is as expected ''' # TODO Fill this out using the error codes here: https://developer.wunderlist.com/documentation/concepts/formats # The expected results can change based on API version, so validate this here if self.api_version: if response.status_code >= 400: raise ValueError('{} {}'.format(response.status_code, str(response.json()))) if method == 'GET': assert response.status_code == 200 elif method == 'POST': assert response.status_code == 201 elif method == 'PATCH': assert response.status_code == 200 elif method == 'DELETE': assert response.status_code == 204
Send a request to the given Wunderlist API endpoint Params: endpoint -- API endpoint to send request to Keyword Args: headers -- headers to add to the request method -- GET, PUT, PATCH, DELETE, etc. params -- parameters to encode in the request data -- data to send with the request def request(self, endpoint, method='GET', headers=None, params=None, data=None): ''' Send a request to the given Wunderlist API endpoint Params: endpoint -- API endpoint to send request to Keyword Args: headers -- headers to add to the request method -- GET, PUT, PATCH, DELETE, etc. params -- parameters to encode in the request data -- data to send with the request ''' if not headers: headers = {} if method in ['POST', 'PATCH', 'PUT']: headers['Content-Type'] = 'application/json' url = '/'.join([self.api_url, 'v' + self.api_version, endpoint]) data = json.dumps(data) if data else None try: response = requests.request(method=method, url=url, params=params, headers=headers, data=data) # TODO Does recreating the exception classes 'requests' use suck? Yes, but it sucks more to expose the underlying library I use except requests.exceptions.Timeout as e: raise wp_exceptions.TimeoutError(e) except requests.exceptions.ConnectionError as e: raise wp_exceptions.ConnectionError(e) self._validate_response(method, response) return response
Exchange a temporary code for an access token allowing access to a user's account See https://developer.wunderlist.com/documentation/concepts/authorization for more info def get_access_token(self, code, client_id, client_secret): ''' Exchange a temporary code for an access token allowing access to a user's account See https://developer.wunderlist.com/documentation/concepts/authorization for more info ''' headers = { 'Content-Type' : 'application/json' } data = { 'client_id' : client_id, 'client_secret' : client_secret, 'code' : code, } str_data = json.dumps(data) response = requests.request(method='POST', url=ACCESS_TOKEN_URL, headers=headers, data=str_data) status_code = response.status_code if status_code != 200: raise ValueError("{} -- {}".format(status_code, response.json())) return body['access_token']
Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type Params: endpoint -- API endpoint to send request to Keyword Args: method -- GET, PUT, PATCH, DELETE, etc. params -- parameters to encode in the request data -- data to send with the request def authenticated_request(self, endpoint, method='GET', params=None, data=None): ''' Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type Params: endpoint -- API endpoint to send request to Keyword Args: method -- GET, PUT, PATCH, DELETE, etc. params -- parameters to encode in the request data -- data to send with the request ''' headers = { 'X-Access-Token' : self.access_token, 'X-Client-ID' : self.client_id } return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)
Updates the list with the given ID to have the given title and public flag def update_list(self, list_id, revision, title=None, public=None): ''' Updates the list with the given ID to have the given title and public flag ''' return lists_endpoint.update_list(self, list_id, revision, title=title, public=public)
Gets tasks for the list with the given ID, filtered by the given completion flag def get_tasks(self, list_id, completed=False): ''' Gets tasks for the list with the given ID, filtered by the given completion flag ''' return tasks_endpoint.get_tasks(self, list_id, completed=completed)
Creates a new task with the given information in the list with the given ID def create_task(self, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None): ''' Creates a new task with the given information in the list with the given ID ''' return tasks_endpoint.create_task(self, list_id, title, assignee_id=assignee_id, completed=completed, recurrence_type=recurrence_type, recurrence_count=recurrence_count, due_date=due_date, starred=starred)
Updates the task with the given ID to have the given information NOTE: The 'remove' parameter is an optional list of parameters to remove from the given task, e.g. ['due_date'] def update_task(self, task_id, revision, title=None, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None, remove=None): ''' Updates the task with the given ID to have the given information NOTE: The 'remove' parameter is an optional list of parameters to remove from the given task, e.g. ['due_date'] ''' return tasks_endpoint.update_task(self, task_id, revision, title=title, assignee_id=assignee_id, completed=completed, recurrence_type=recurrence_type, recurrence_count=recurrence_count, due_date=due_date, starred=starred, remove=remove)
Updates the note with the given ID to have the given content def update_note(self, note_id, revision, content): ''' Updates the note with the given ID to have the given content ''' return notes_endpoint.update_note(self, note_id, revision, content)
Gets subtasks for task with given ID def get_task_subtasks(self, task_id, completed=False): ''' Gets subtasks for task with given ID ''' return subtasks_endpoint.get_task_subtasks(self, task_id, completed=completed)
Gets subtasks for the list with given ID def get_list_subtasks(self, list_id, completed=False): ''' Gets subtasks for the list with given ID ''' return subtasks_endpoint.get_list_subtasks(self, list_id, completed=completed)
Creates a subtask with the given title under the task with the given ID Return: Newly-created subtask def create_subtask(self, task_id, title, completed=False): ''' Creates a subtask with the given title under the task with the given ID Return: Newly-created subtask ''' return subtasks_endpoint.create_subtask(self, task_id, title, completed=completed)
Updates the subtask with the given ID See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information Returns: Subtask with given ID with properties and revision updated def update_subtask(self, subtask_id, revision, title=None, completed=None): ''' Updates the subtask with the given ID See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information Returns: Subtask with given ID with properties and revision updated ''' return subtasks_endpoint.update_subtask(self, subtask_id, revision, title=title, completed=completed)
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: The updated ListPositionsObj-mapped object defining the order of list layout def update_list_positions_obj(self, positions_obj_id, revision, values): ''' Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: The updated ListPositionsObj-mapped object defining the order of list layout ''' return positions_endpoints.update_list_positions_obj(self, positions_obj_id, revision, values)