repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
dnephin/PyStaticConfiguration | staticconf/config.py | get_namespace | python | def get_namespace(name):
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name] | Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L188-L194 | null | """
Store configuration in :class:`ConfigNamespace` objects and provide tools
for reloading, and displaying help messages.
Configuration Reloading
-----------------------
Configuration reloading is supported using a :class:`ConfigFacade`, which
composes a :class:`ConfigurationWatcher` and a :class:`ReloadCallbackChain`.
These classes provide a way of reloading configuration when the file is
modified.
"""
from collections import namedtuple
import hashlib
import logging
import os
import time
import weakref
import six
from staticconf import errors
log = logging.getLogger(__name__)
# Name for the default namespace
DEFAULT = 'DEFAULT'
def remove_by_keys(dictionary, keys):
keys = set(keys)
def filter_by_keys(item):
k, _ = item
return k not in keys
return list(filter(filter_by_keys, six.iteritems(dictionary)))
class ConfigMap(object):
"""A ConfigMap can be used to wrap a dictionary in your configuration.
It will allow you to retain your mapping structure (and prevent it
from being flattened).
"""
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __getitem__(self, item):
return self.data[item]
def get(self, item, default=None):
return self.data.get(item, default)
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
class ConfigNamespace(object):
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name):
self.name = name
self.configuration_values = {}
self.value_proxies = weakref.WeakValueDictionary()
def get_name(self):
return self.name
def get_value_proxies(self):
return list(self.value_proxies.values())
def register_proxy(self, proxy):
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data,
error_on_unknown,
error_on_dupe,
log_keys_only=False,
):
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args, **kwargs):
self.configuration_values.update(*args, **kwargs)
def get_config_values(self):
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_config_dict(self):
"""Reconstruct the nested structure of this object's configuration
and return it as a dict.
"""
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
def get_known_keys(self):
return set(vproxy.config_key for vproxy in self.get_value_proxies())
def validate_keys(
self,
config_data,
error_on_unknown,
log_keys_only=False,
):
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown = [k for k, _ in unknown]
msg = "Unexpected value in %s configuration: %s" % (self.name, unknown)
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(self, config_data, error_on_duplicate):
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item, default=None):
return self.configuration_values.get(item, default)
def __getitem__(self, item):
return self.configuration_values[item]
def __setitem__(self, key, value):
self.configuration_values[key] = value
def __contains__(self, item):
return item in self.configuration_values
def clear(self):
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self):
self.clear()
self.value_proxies.clear()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
configuration_namespaces = {DEFAULT: ConfigNamespace(DEFAULT)}
KeyDescription = namedtuple('KeyDescription', 'name validator default help')
def get_namespaces_from_names(name, all_names):
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name)
def reload(name=DEFAULT, all_names=False):
"""Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
"""
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset()
def validate(name=DEFAULT, all_names=False):
"""Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean
"""
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
class ConfigHelp(object):
"""Register and display help messages about config keys."""
def __init__(self):
self.descriptions = {}
def add(self, name, validator, default, namespace, help):
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def view_help(self):
"""Return a help message describing all the statically configured keys.
"""
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp))
def clear(self):
self.descriptions.clear()
config_help = ConfigHelp()
view_help = config_help.view_help
def _reset():
"""Used for internal testing."""
for namespace in configuration_namespaces.values():
namespace._reset()
config_help.clear()
def has_duplicate_keys(config_data, base_conf, raise_error):
"""Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True."""
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
class ConfigurationWatcher(object):
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader,
filenames,
min_interval=0,
reloader=None,
comparators=None):
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames):
if isinstance(filenames, six.string_types):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self):
return self.last_check + self.min_interval <= time.time()
def reload_if_changed(self, force=False):
"""If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
"""
if (force or self.should_check) and self.file_modified():
return self.reload()
def file_modified(self):
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self):
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self):
return self.reloader
def load_config(self):
return self.config_loader()
class IComparator(object):
"""Interface for a comparator which is used by :class:`ConfigurationWatcher`
to determine if a file has been modified since the last check. A comparator
is used to reduce the work required to reload configuration. Comparators
should implement a mechanism that is relatively efficient (and scalable),
so it can be performed frequently.
:param filenames: A list of absolute paths to configuration files.
"""
def __init__(self, filenames):
pass
def has_changed(self):
"""Returns True if any of the files have been modified since the last
call to :func:`has_changed`. Returns False otherwise.
"""
pass
class InodeComparator(object):
"""Compare files by inode and device number. This is a good comparator to
use when your files can change multiple times per second.
"""
def __init__(self, filenames):
self.filenames = filenames
self.inodes = self.get_inodes()
def get_inodes(self):
def get_inode(stbuf):
return stbuf.st_dev, stbuf.st_ino
return [get_inode(os.stat(filename)) for filename in self.filenames]
def has_changed(self):
last_inodes, self.inodes = self.inodes, self.get_inodes()
return last_inodes != self.inodes
def build_compare_func(err_logger=None):
"""Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger."""
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func
class MTimeComparator(object):
"""Compare files by modified time, or using compare_func,
if it is not None.
.. note::
Most filesystems only store modified time with second grangularity
so multiple changes within the same second can be ignored.
"""
def __init__(self, filenames, compare_func=None):
self.compare_func = (os.path.getmtime if compare_func is None
else compare_func)
self.filenames_mtimes = {
filename: self.compare_func(filename) for filename in filenames
}
def has_changed(self):
for filename, compare_val in self.filenames_mtimes.items():
current_compare_val = self.compare_func(filename)
if compare_val != current_compare_val:
self.filenames_mtimes[filename] = current_compare_val
return True
return False
class MD5Comparator(object):
"""Compare files by md5 hash of their contents. This comparator will be
slower for larger files, but is more resilient to modifications which only
change mtime, but not the files contents.
"""
def __init__(self, filenames):
self.filenames = filenames
self.hashes = self.get_hashes()
def get_hashes(self):
def build_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as fh:
hasher.update(fh.read())
return hasher.digest()
return [build_hash(filename) for filename in self.filenames]
def has_changed(self):
last_hashes, self.hashes = self.hashes, self.get_hashes()
return last_hashes != self.hashes
class ReloadCallbackChain(object):
"""A chain of callbacks which will be triggered after configuration is
reloaded. Designed to work with :class:`ConfigurationWatcher`.
When this class is called it performs two operations:
* calls :func:`reload` on the `namespace`
* calls all attached callbacks
Usage:
.. code-block:: python
chain = ReloadCallbackChain()
chain.add('some_id', callback_foo)
chain.add('other_id', other_callback)
...
# some time later
chain.remove('some_id')
:param namespace: the name of the namespace to :func:`reload`
:param all_names: if True :func:`reload` all namespaces and ignore the
`namespace` param. Defaults to False
:param callbacks: initial list of tuples to add to the callback chain
"""
def __init__(self, namespace=DEFAULT, all_names=False, callbacks=None):
self.namespace = namespace
self.all_names = all_names
self.callbacks = dict(callbacks or ())
def add(self, identifier, callback):
self.callbacks[identifier] = callback
def remove(self, identifier):
del self.callbacks[identifier]
def __call__(self):
reload(name=self.namespace, all_names=self.all_names)
for callback in six.itervalues(self.callbacks):
callback()
def build_loader_callable(load_func, filename, namespace):
def load_configuration():
get_namespace(namespace).clear()
return load_func(filename, namespace=namespace)
return load_configuration
class ConfigFacade(object):
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher):
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
):
"""Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
"""
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
def add_callback(self, identifier, callback):
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force=False):
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
|
dnephin/PyStaticConfiguration | staticconf/config.py | reload | python | def reload(name=DEFAULT, all_names=False):
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset() | Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name` | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L197-L209 | [
"def get_namespaces_from_names(name, all_names):\n \"\"\"Return a generator which yields namespace objects.\"\"\"\n names = configuration_namespaces.keys() if all_names else [name]\n for name in names:\n yield get_namespace(name)\n"
] | """
Store configuration in :class:`ConfigNamespace` objects and provide tools
for reloading, and displaying help messages.
Configuration Reloading
-----------------------
Configuration reloading is supported using a :class:`ConfigFacade`, which
composes a :class:`ConfigurationWatcher` and a :class:`ReloadCallbackChain`.
These classes provide a way of reloading configuration when the file is
modified.
"""
from collections import namedtuple
import hashlib
import logging
import os
import time
import weakref
import six
from staticconf import errors
log = logging.getLogger(__name__)
# Name for the default namespace
DEFAULT = 'DEFAULT'
def remove_by_keys(dictionary, keys):
keys = set(keys)
def filter_by_keys(item):
k, _ = item
return k not in keys
return list(filter(filter_by_keys, six.iteritems(dictionary)))
class ConfigMap(object):
"""A ConfigMap can be used to wrap a dictionary in your configuration.
It will allow you to retain your mapping structure (and prevent it
from being flattened).
"""
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __getitem__(self, item):
return self.data[item]
def get(self, item, default=None):
return self.data.get(item, default)
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
class ConfigNamespace(object):
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name):
self.name = name
self.configuration_values = {}
self.value_proxies = weakref.WeakValueDictionary()
def get_name(self):
return self.name
def get_value_proxies(self):
return list(self.value_proxies.values())
def register_proxy(self, proxy):
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data,
error_on_unknown,
error_on_dupe,
log_keys_only=False,
):
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args, **kwargs):
self.configuration_values.update(*args, **kwargs)
def get_config_values(self):
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_config_dict(self):
"""Reconstruct the nested structure of this object's configuration
and return it as a dict.
"""
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
def get_known_keys(self):
return set(vproxy.config_key for vproxy in self.get_value_proxies())
def validate_keys(
self,
config_data,
error_on_unknown,
log_keys_only=False,
):
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown = [k for k, _ in unknown]
msg = "Unexpected value in %s configuration: %s" % (self.name, unknown)
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(self, config_data, error_on_duplicate):
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item, default=None):
return self.configuration_values.get(item, default)
def __getitem__(self, item):
return self.configuration_values[item]
def __setitem__(self, key, value):
self.configuration_values[key] = value
def __contains__(self, item):
return item in self.configuration_values
def clear(self):
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self):
self.clear()
self.value_proxies.clear()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
configuration_namespaces = {DEFAULT: ConfigNamespace(DEFAULT)}
KeyDescription = namedtuple('KeyDescription', 'name validator default help')
def get_namespaces_from_names(name, all_names):
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name)
def get_namespace(name):
"""Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist.
"""
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name]
def validate(name=DEFAULT, all_names=False):
"""Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean
"""
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
class ConfigHelp(object):
"""Register and display help messages about config keys."""
def __init__(self):
self.descriptions = {}
def add(self, name, validator, default, namespace, help):
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def view_help(self):
"""Return a help message describing all the statically configured keys.
"""
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp))
def clear(self):
self.descriptions.clear()
config_help = ConfigHelp()
view_help = config_help.view_help
def _reset():
"""Used for internal testing."""
for namespace in configuration_namespaces.values():
namespace._reset()
config_help.clear()
def has_duplicate_keys(config_data, base_conf, raise_error):
"""Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True."""
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
class ConfigurationWatcher(object):
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader,
filenames,
min_interval=0,
reloader=None,
comparators=None):
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames):
if isinstance(filenames, six.string_types):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self):
return self.last_check + self.min_interval <= time.time()
def reload_if_changed(self, force=False):
"""If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
"""
if (force or self.should_check) and self.file_modified():
return self.reload()
def file_modified(self):
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self):
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self):
return self.reloader
def load_config(self):
return self.config_loader()
class IComparator(object):
"""Interface for a comparator which is used by :class:`ConfigurationWatcher`
to determine if a file has been modified since the last check. A comparator
is used to reduce the work required to reload configuration. Comparators
should implement a mechanism that is relatively efficient (and scalable),
so it can be performed frequently.
:param filenames: A list of absolute paths to configuration files.
"""
def __init__(self, filenames):
pass
def has_changed(self):
"""Returns True if any of the files have been modified since the last
call to :func:`has_changed`. Returns False otherwise.
"""
pass
class InodeComparator(object):
"""Compare files by inode and device number. This is a good comparator to
use when your files can change multiple times per second.
"""
def __init__(self, filenames):
self.filenames = filenames
self.inodes = self.get_inodes()
def get_inodes(self):
def get_inode(stbuf):
return stbuf.st_dev, stbuf.st_ino
return [get_inode(os.stat(filename)) for filename in self.filenames]
def has_changed(self):
last_inodes, self.inodes = self.inodes, self.get_inodes()
return last_inodes != self.inodes
def build_compare_func(err_logger=None):
"""Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger."""
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func
class MTimeComparator(object):
"""Compare files by modified time, or using compare_func,
if it is not None.
.. note::
Most filesystems only store modified time with second grangularity
so multiple changes within the same second can be ignored.
"""
def __init__(self, filenames, compare_func=None):
self.compare_func = (os.path.getmtime if compare_func is None
else compare_func)
self.filenames_mtimes = {
filename: self.compare_func(filename) for filename in filenames
}
def has_changed(self):
for filename, compare_val in self.filenames_mtimes.items():
current_compare_val = self.compare_func(filename)
if compare_val != current_compare_val:
self.filenames_mtimes[filename] = current_compare_val
return True
return False
class MD5Comparator(object):
"""Compare files by md5 hash of their contents. This comparator will be
slower for larger files, but is more resilient to modifications which only
change mtime, but not the files contents.
"""
def __init__(self, filenames):
self.filenames = filenames
self.hashes = self.get_hashes()
def get_hashes(self):
def build_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as fh:
hasher.update(fh.read())
return hasher.digest()
return [build_hash(filename) for filename in self.filenames]
def has_changed(self):
last_hashes, self.hashes = self.hashes, self.get_hashes()
return last_hashes != self.hashes
class ReloadCallbackChain(object):
"""A chain of callbacks which will be triggered after configuration is
reloaded. Designed to work with :class:`ConfigurationWatcher`.
When this class is called it performs two operations:
* calls :func:`reload` on the `namespace`
* calls all attached callbacks
Usage:
.. code-block:: python
chain = ReloadCallbackChain()
chain.add('some_id', callback_foo)
chain.add('other_id', other_callback)
...
# some time later
chain.remove('some_id')
:param namespace: the name of the namespace to :func:`reload`
:param all_names: if True :func:`reload` all namespaces and ignore the
`namespace` param. Defaults to False
:param callbacks: initial list of tuples to add to the callback chain
"""
def __init__(self, namespace=DEFAULT, all_names=False, callbacks=None):
self.namespace = namespace
self.all_names = all_names
self.callbacks = dict(callbacks or ())
def add(self, identifier, callback):
self.callbacks[identifier] = callback
def remove(self, identifier):
del self.callbacks[identifier]
def __call__(self):
reload(name=self.namespace, all_names=self.all_names)
for callback in six.itervalues(self.callbacks):
callback()
def build_loader_callable(load_func, filename, namespace):
def load_configuration():
get_namespace(namespace).clear()
return load_func(filename, namespace=namespace)
return load_configuration
class ConfigFacade(object):
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher):
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
):
"""Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
"""
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
def add_callback(self, identifier, callback):
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force=False):
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
|
dnephin/PyStaticConfiguration | staticconf/config.py | validate | python | def validate(name=DEFAULT, all_names=False):
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies()) | Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L212-L225 | [
"def get_namespaces_from_names(name, all_names):\n \"\"\"Return a generator which yields namespace objects.\"\"\"\n names = configuration_namespaces.keys() if all_names else [name]\n for name in names:\n yield get_namespace(name)\n"
] | """
Store configuration in :class:`ConfigNamespace` objects and provide tools
for reloading, and displaying help messages.
Configuration Reloading
-----------------------
Configuration reloading is supported using a :class:`ConfigFacade`, which
composes a :class:`ConfigurationWatcher` and a :class:`ReloadCallbackChain`.
These classes provide a way of reloading configuration when the file is
modified.
"""
from collections import namedtuple
import hashlib
import logging
import os
import time
import weakref
import six
from staticconf import errors
log = logging.getLogger(__name__)
# Name for the default namespace
DEFAULT = 'DEFAULT'
def remove_by_keys(dictionary, keys):
keys = set(keys)
def filter_by_keys(item):
k, _ = item
return k not in keys
return list(filter(filter_by_keys, six.iteritems(dictionary)))
class ConfigMap(object):
"""A ConfigMap can be used to wrap a dictionary in your configuration.
It will allow you to retain your mapping structure (and prevent it
from being flattened).
"""
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __getitem__(self, item):
return self.data[item]
def get(self, item, default=None):
return self.data.get(item, default)
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
class ConfigNamespace(object):
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name):
self.name = name
self.configuration_values = {}
self.value_proxies = weakref.WeakValueDictionary()
def get_name(self):
return self.name
def get_value_proxies(self):
return list(self.value_proxies.values())
def register_proxy(self, proxy):
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data,
error_on_unknown,
error_on_dupe,
log_keys_only=False,
):
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args, **kwargs):
self.configuration_values.update(*args, **kwargs)
def get_config_values(self):
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_config_dict(self):
"""Reconstruct the nested structure of this object's configuration
and return it as a dict.
"""
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
def get_known_keys(self):
return set(vproxy.config_key for vproxy in self.get_value_proxies())
def validate_keys(
self,
config_data,
error_on_unknown,
log_keys_only=False,
):
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown = [k for k, _ in unknown]
msg = "Unexpected value in %s configuration: %s" % (self.name, unknown)
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(self, config_data, error_on_duplicate):
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item, default=None):
return self.configuration_values.get(item, default)
def __getitem__(self, item):
return self.configuration_values[item]
def __setitem__(self, key, value):
self.configuration_values[key] = value
def __contains__(self, item):
return item in self.configuration_values
def clear(self):
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self):
self.clear()
self.value_proxies.clear()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
configuration_namespaces = {DEFAULT: ConfigNamespace(DEFAULT)}
KeyDescription = namedtuple('KeyDescription', 'name validator default help')
def get_namespaces_from_names(name, all_names):
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name)
def get_namespace(name):
"""Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist.
"""
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name]
def reload(name=DEFAULT, all_names=False):
"""Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
"""
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset()
class ConfigHelp(object):
"""Register and display help messages about config keys."""
def __init__(self):
self.descriptions = {}
def add(self, name, validator, default, namespace, help):
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def view_help(self):
"""Return a help message describing all the statically configured keys.
"""
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp))
def clear(self):
self.descriptions.clear()
config_help = ConfigHelp()
view_help = config_help.view_help
def _reset():
"""Used for internal testing."""
for namespace in configuration_namespaces.values():
namespace._reset()
config_help.clear()
def has_duplicate_keys(config_data, base_conf, raise_error):
"""Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True."""
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
class ConfigurationWatcher(object):
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader,
filenames,
min_interval=0,
reloader=None,
comparators=None):
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames):
if isinstance(filenames, six.string_types):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self):
return self.last_check + self.min_interval <= time.time()
def reload_if_changed(self, force=False):
"""If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
"""
if (force or self.should_check) and self.file_modified():
return self.reload()
def file_modified(self):
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self):
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self):
return self.reloader
def load_config(self):
return self.config_loader()
class IComparator(object):
"""Interface for a comparator which is used by :class:`ConfigurationWatcher`
to determine if a file has been modified since the last check. A comparator
is used to reduce the work required to reload configuration. Comparators
should implement a mechanism that is relatively efficient (and scalable),
so it can be performed frequently.
:param filenames: A list of absolute paths to configuration files.
"""
def __init__(self, filenames):
pass
def has_changed(self):
"""Returns True if any of the files have been modified since the last
call to :func:`has_changed`. Returns False otherwise.
"""
pass
class InodeComparator(object):
"""Compare files by inode and device number. This is a good comparator to
use when your files can change multiple times per second.
"""
def __init__(self, filenames):
self.filenames = filenames
self.inodes = self.get_inodes()
def get_inodes(self):
def get_inode(stbuf):
return stbuf.st_dev, stbuf.st_ino
return [get_inode(os.stat(filename)) for filename in self.filenames]
def has_changed(self):
last_inodes, self.inodes = self.inodes, self.get_inodes()
return last_inodes != self.inodes
def build_compare_func(err_logger=None):
"""Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger."""
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func
class MTimeComparator(object):
"""Compare files by modified time, or using compare_func,
if it is not None.
.. note::
Most filesystems only store modified time with second grangularity
so multiple changes within the same second can be ignored.
"""
def __init__(self, filenames, compare_func=None):
self.compare_func = (os.path.getmtime if compare_func is None
else compare_func)
self.filenames_mtimes = {
filename: self.compare_func(filename) for filename in filenames
}
def has_changed(self):
for filename, compare_val in self.filenames_mtimes.items():
current_compare_val = self.compare_func(filename)
if compare_val != current_compare_val:
self.filenames_mtimes[filename] = current_compare_val
return True
return False
class MD5Comparator(object):
"""Compare files by md5 hash of their contents. This comparator will be
slower for larger files, but is more resilient to modifications which only
change mtime, but not the files contents.
"""
def __init__(self, filenames):
self.filenames = filenames
self.hashes = self.get_hashes()
def get_hashes(self):
def build_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as fh:
hasher.update(fh.read())
return hasher.digest()
return [build_hash(filename) for filename in self.filenames]
def has_changed(self):
last_hashes, self.hashes = self.hashes, self.get_hashes()
return last_hashes != self.hashes
class ReloadCallbackChain(object):
"""A chain of callbacks which will be triggered after configuration is
reloaded. Designed to work with :class:`ConfigurationWatcher`.
When this class is called it performs two operations:
* calls :func:`reload` on the `namespace`
* calls all attached callbacks
Usage:
.. code-block:: python
chain = ReloadCallbackChain()
chain.add('some_id', callback_foo)
chain.add('other_id', other_callback)
...
# some time later
chain.remove('some_id')
:param namespace: the name of the namespace to :func:`reload`
:param all_names: if True :func:`reload` all namespaces and ignore the
`namespace` param. Defaults to False
:param callbacks: initial list of tuples to add to the callback chain
"""
def __init__(self, namespace=DEFAULT, all_names=False, callbacks=None):
self.namespace = namespace
self.all_names = all_names
self.callbacks = dict(callbacks or ())
def add(self, identifier, callback):
self.callbacks[identifier] = callback
def remove(self, identifier):
del self.callbacks[identifier]
def __call__(self):
reload(name=self.namespace, all_names=self.all_names)
for callback in six.itervalues(self.callbacks):
callback()
def build_loader_callable(load_func, filename, namespace):
def load_configuration():
get_namespace(namespace).clear()
return load_func(filename, namespace=namespace)
return load_configuration
class ConfigFacade(object):
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher):
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
):
"""Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
"""
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
def add_callback(self, identifier, callback):
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force=False):
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
|
dnephin/PyStaticConfiguration | staticconf/config.py | has_duplicate_keys | python | def has_duplicate_keys(config_data, base_conf, raise_error):
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True | Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L276-L286 | null | """
Store configuration in :class:`ConfigNamespace` objects and provide tools
for reloading, and displaying help messages.
Configuration Reloading
-----------------------
Configuration reloading is supported using a :class:`ConfigFacade`, which
composes a :class:`ConfigurationWatcher` and a :class:`ReloadCallbackChain`.
These classes provide a way of reloading configuration when the file is
modified.
"""
from collections import namedtuple
import hashlib
import logging
import os
import time
import weakref
import six
from staticconf import errors
log = logging.getLogger(__name__)
# Name for the default namespace
DEFAULT = 'DEFAULT'
def remove_by_keys(dictionary, keys):
keys = set(keys)
def filter_by_keys(item):
k, _ = item
return k not in keys
return list(filter(filter_by_keys, six.iteritems(dictionary)))
class ConfigMap(object):
"""A ConfigMap can be used to wrap a dictionary in your configuration.
It will allow you to retain your mapping structure (and prevent it
from being flattened).
"""
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __getitem__(self, item):
return self.data[item]
def get(self, item, default=None):
return self.data.get(item, default)
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
class ConfigNamespace(object):
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name):
self.name = name
self.configuration_values = {}
self.value_proxies = weakref.WeakValueDictionary()
def get_name(self):
return self.name
def get_value_proxies(self):
return list(self.value_proxies.values())
def register_proxy(self, proxy):
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data,
error_on_unknown,
error_on_dupe,
log_keys_only=False,
):
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args, **kwargs):
self.configuration_values.update(*args, **kwargs)
def get_config_values(self):
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_config_dict(self):
"""Reconstruct the nested structure of this object's configuration
and return it as a dict.
"""
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
def get_known_keys(self):
return set(vproxy.config_key for vproxy in self.get_value_proxies())
def validate_keys(
self,
config_data,
error_on_unknown,
log_keys_only=False,
):
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown = [k for k, _ in unknown]
msg = "Unexpected value in %s configuration: %s" % (self.name, unknown)
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(self, config_data, error_on_duplicate):
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item, default=None):
return self.configuration_values.get(item, default)
def __getitem__(self, item):
return self.configuration_values[item]
def __setitem__(self, key, value):
self.configuration_values[key] = value
def __contains__(self, item):
return item in self.configuration_values
def clear(self):
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self):
self.clear()
self.value_proxies.clear()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
configuration_namespaces = {DEFAULT: ConfigNamespace(DEFAULT)}
KeyDescription = namedtuple('KeyDescription', 'name validator default help')
def get_namespaces_from_names(name, all_names):
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name)
def get_namespace(name):
"""Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist.
"""
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name]
def reload(name=DEFAULT, all_names=False):
"""Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
"""
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset()
def validate(name=DEFAULT, all_names=False):
"""Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean
"""
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
class ConfigHelp(object):
"""Register and display help messages about config keys."""
def __init__(self):
self.descriptions = {}
def add(self, name, validator, default, namespace, help):
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def view_help(self):
"""Return a help message describing all the statically configured keys.
"""
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp))
def clear(self):
self.descriptions.clear()
config_help = ConfigHelp()
view_help = config_help.view_help
def _reset():
"""Used for internal testing."""
for namespace in configuration_namespaces.values():
namespace._reset()
config_help.clear()
class ConfigurationWatcher(object):
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader,
filenames,
min_interval=0,
reloader=None,
comparators=None):
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames):
if isinstance(filenames, six.string_types):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self):
return self.last_check + self.min_interval <= time.time()
def reload_if_changed(self, force=False):
"""If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
"""
if (force or self.should_check) and self.file_modified():
return self.reload()
def file_modified(self):
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self):
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self):
return self.reloader
def load_config(self):
return self.config_loader()
class IComparator(object):
"""Interface for a comparator which is used by :class:`ConfigurationWatcher`
to determine if a file has been modified since the last check. A comparator
is used to reduce the work required to reload configuration. Comparators
should implement a mechanism that is relatively efficient (and scalable),
so it can be performed frequently.
:param filenames: A list of absolute paths to configuration files.
"""
def __init__(self, filenames):
pass
def has_changed(self):
"""Returns True if any of the files have been modified since the last
call to :func:`has_changed`. Returns False otherwise.
"""
pass
class InodeComparator(object):
"""Compare files by inode and device number. This is a good comparator to
use when your files can change multiple times per second.
"""
def __init__(self, filenames):
self.filenames = filenames
self.inodes = self.get_inodes()
def get_inodes(self):
def get_inode(stbuf):
return stbuf.st_dev, stbuf.st_ino
return [get_inode(os.stat(filename)) for filename in self.filenames]
def has_changed(self):
last_inodes, self.inodes = self.inodes, self.get_inodes()
return last_inodes != self.inodes
def build_compare_func(err_logger=None):
"""Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger."""
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func
class MTimeComparator(object):
"""Compare files by modified time, or using compare_func,
if it is not None.
.. note::
Most filesystems only store modified time with second grangularity
so multiple changes within the same second can be ignored.
"""
def __init__(self, filenames, compare_func=None):
self.compare_func = (os.path.getmtime if compare_func is None
else compare_func)
self.filenames_mtimes = {
filename: self.compare_func(filename) for filename in filenames
}
def has_changed(self):
for filename, compare_val in self.filenames_mtimes.items():
current_compare_val = self.compare_func(filename)
if compare_val != current_compare_val:
self.filenames_mtimes[filename] = current_compare_val
return True
return False
class MD5Comparator(object):
"""Compare files by md5 hash of their contents. This comparator will be
slower for larger files, but is more resilient to modifications which only
change mtime, but not the files contents.
"""
def __init__(self, filenames):
self.filenames = filenames
self.hashes = self.get_hashes()
def get_hashes(self):
def build_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as fh:
hasher.update(fh.read())
return hasher.digest()
return [build_hash(filename) for filename in self.filenames]
def has_changed(self):
last_hashes, self.hashes = self.hashes, self.get_hashes()
return last_hashes != self.hashes
class ReloadCallbackChain(object):
"""A chain of callbacks which will be triggered after configuration is
reloaded. Designed to work with :class:`ConfigurationWatcher`.
When this class is called it performs two operations:
* calls :func:`reload` on the `namespace`
* calls all attached callbacks
Usage:
.. code-block:: python
chain = ReloadCallbackChain()
chain.add('some_id', callback_foo)
chain.add('other_id', other_callback)
...
# some time later
chain.remove('some_id')
:param namespace: the name of the namespace to :func:`reload`
:param all_names: if True :func:`reload` all namespaces and ignore the
`namespace` param. Defaults to False
:param callbacks: initial list of tuples to add to the callback chain
"""
def __init__(self, namespace=DEFAULT, all_names=False, callbacks=None):
self.namespace = namespace
self.all_names = all_names
self.callbacks = dict(callbacks or ())
def add(self, identifier, callback):
self.callbacks[identifier] = callback
def remove(self, identifier):
del self.callbacks[identifier]
def __call__(self):
reload(name=self.namespace, all_names=self.all_names)
for callback in six.itervalues(self.callbacks):
callback()
def build_loader_callable(load_func, filename, namespace):
def load_configuration():
get_namespace(namespace).clear()
return load_func(filename, namespace=namespace)
return load_configuration
class ConfigFacade(object):
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher):
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
):
"""Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
"""
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
def add_callback(self, identifier, callback):
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force=False):
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
|
dnephin/PyStaticConfiguration | staticconf/config.py | build_compare_func | python | def build_compare_func(err_logger=None):
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func | Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L436-L451 | null | """
Store configuration in :class:`ConfigNamespace` objects and provide tools
for reloading, and displaying help messages.
Configuration Reloading
-----------------------
Configuration reloading is supported using a :class:`ConfigFacade`, which
composes a :class:`ConfigurationWatcher` and a :class:`ReloadCallbackChain`.
These classes provide a way of reloading configuration when the file is
modified.
"""
from collections import namedtuple
import hashlib
import logging
import os
import time
import weakref
import six
from staticconf import errors
log = logging.getLogger(__name__)
# Name for the default namespace
DEFAULT = 'DEFAULT'
def remove_by_keys(dictionary, keys):
keys = set(keys)
def filter_by_keys(item):
k, _ = item
return k not in keys
return list(filter(filter_by_keys, six.iteritems(dictionary)))
class ConfigMap(object):
"""A ConfigMap can be used to wrap a dictionary in your configuration.
It will allow you to retain your mapping structure (and prevent it
from being flattened).
"""
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __getitem__(self, item):
return self.data[item]
def get(self, item, default=None):
return self.data.get(item, default)
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
class ConfigNamespace(object):
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name):
self.name = name
self.configuration_values = {}
self.value_proxies = weakref.WeakValueDictionary()
def get_name(self):
return self.name
def get_value_proxies(self):
return list(self.value_proxies.values())
def register_proxy(self, proxy):
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data,
error_on_unknown,
error_on_dupe,
log_keys_only=False,
):
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args, **kwargs):
self.configuration_values.update(*args, **kwargs)
def get_config_values(self):
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_config_dict(self):
"""Reconstruct the nested structure of this object's configuration
and return it as a dict.
"""
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
def get_known_keys(self):
return set(vproxy.config_key for vproxy in self.get_value_proxies())
def validate_keys(
self,
config_data,
error_on_unknown,
log_keys_only=False,
):
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown = [k for k, _ in unknown]
msg = "Unexpected value in %s configuration: %s" % (self.name, unknown)
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(self, config_data, error_on_duplicate):
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item, default=None):
return self.configuration_values.get(item, default)
def __getitem__(self, item):
return self.configuration_values[item]
def __setitem__(self, key, value):
self.configuration_values[key] = value
def __contains__(self, item):
return item in self.configuration_values
def clear(self):
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self):
self.clear()
self.value_proxies.clear()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
configuration_namespaces = {DEFAULT: ConfigNamespace(DEFAULT)}
KeyDescription = namedtuple('KeyDescription', 'name validator default help')
def get_namespaces_from_names(name, all_names):
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name)
def get_namespace(name):
"""Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist.
"""
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name]
def reload(name=DEFAULT, all_names=False):
"""Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
"""
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset()
def validate(name=DEFAULT, all_names=False):
"""Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean
"""
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
class ConfigHelp(object):
"""Register and display help messages about config keys."""
def __init__(self):
self.descriptions = {}
def add(self, name, validator, default, namespace, help):
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def view_help(self):
"""Return a help message describing all the statically configured keys.
"""
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp))
def clear(self):
self.descriptions.clear()
config_help = ConfigHelp()
view_help = config_help.view_help
def _reset():
"""Used for internal testing."""
for namespace in configuration_namespaces.values():
namespace._reset()
config_help.clear()
def has_duplicate_keys(config_data, base_conf, raise_error):
"""Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True."""
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
class ConfigurationWatcher(object):
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader,
filenames,
min_interval=0,
reloader=None,
comparators=None):
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames):
if isinstance(filenames, six.string_types):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self):
return self.last_check + self.min_interval <= time.time()
def reload_if_changed(self, force=False):
"""If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
"""
if (force or self.should_check) and self.file_modified():
return self.reload()
def file_modified(self):
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self):
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self):
return self.reloader
def load_config(self):
return self.config_loader()
class IComparator(object):
"""Interface for a comparator which is used by :class:`ConfigurationWatcher`
to determine if a file has been modified since the last check. A comparator
is used to reduce the work required to reload configuration. Comparators
should implement a mechanism that is relatively efficient (and scalable),
so it can be performed frequently.
:param filenames: A list of absolute paths to configuration files.
"""
def __init__(self, filenames):
pass
def has_changed(self):
"""Returns True if any of the files have been modified since the last
call to :func:`has_changed`. Returns False otherwise.
"""
pass
class InodeComparator(object):
"""Compare files by inode and device number. This is a good comparator to
use when your files can change multiple times per second.
"""
def __init__(self, filenames):
self.filenames = filenames
self.inodes = self.get_inodes()
def get_inodes(self):
def get_inode(stbuf):
return stbuf.st_dev, stbuf.st_ino
return [get_inode(os.stat(filename)) for filename in self.filenames]
def has_changed(self):
last_inodes, self.inodes = self.inodes, self.get_inodes()
return last_inodes != self.inodes
class MTimeComparator(object):
"""Compare files by modified time, or using compare_func,
if it is not None.
.. note::
Most filesystems only store modified time with second grangularity
so multiple changes within the same second can be ignored.
"""
def __init__(self, filenames, compare_func=None):
self.compare_func = (os.path.getmtime if compare_func is None
else compare_func)
self.filenames_mtimes = {
filename: self.compare_func(filename) for filename in filenames
}
def has_changed(self):
for filename, compare_val in self.filenames_mtimes.items():
current_compare_val = self.compare_func(filename)
if compare_val != current_compare_val:
self.filenames_mtimes[filename] = current_compare_val
return True
return False
class MD5Comparator(object):
"""Compare files by md5 hash of their contents. This comparator will be
slower for larger files, but is more resilient to modifications which only
change mtime, but not the files contents.
"""
def __init__(self, filenames):
self.filenames = filenames
self.hashes = self.get_hashes()
def get_hashes(self):
def build_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as fh:
hasher.update(fh.read())
return hasher.digest()
return [build_hash(filename) for filename in self.filenames]
def has_changed(self):
last_hashes, self.hashes = self.hashes, self.get_hashes()
return last_hashes != self.hashes
class ReloadCallbackChain(object):
"""A chain of callbacks which will be triggered after configuration is
reloaded. Designed to work with :class:`ConfigurationWatcher`.
When this class is called it performs two operations:
* calls :func:`reload` on the `namespace`
* calls all attached callbacks
Usage:
.. code-block:: python
chain = ReloadCallbackChain()
chain.add('some_id', callback_foo)
chain.add('other_id', other_callback)
...
# some time later
chain.remove('some_id')
:param namespace: the name of the namespace to :func:`reload`
:param all_names: if True :func:`reload` all namespaces and ignore the
`namespace` param. Defaults to False
:param callbacks: initial list of tuples to add to the callback chain
"""
def __init__(self, namespace=DEFAULT, all_names=False, callbacks=None):
self.namespace = namespace
self.all_names = all_names
self.callbacks = dict(callbacks or ())
def add(self, identifier, callback):
self.callbacks[identifier] = callback
def remove(self, identifier):
del self.callbacks[identifier]
def __call__(self):
reload(name=self.namespace, all_names=self.all_names)
for callback in six.itervalues(self.callbacks):
callback()
def build_loader_callable(load_func, filename, namespace):
def load_configuration():
get_namespace(namespace).clear()
return load_func(filename, namespace=namespace)
return load_configuration
class ConfigFacade(object):
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher):
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
):
"""Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
"""
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
def add_callback(self, identifier, callback):
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force=False):
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
|
dnephin/PyStaticConfiguration | staticconf/config.py | ConfigNamespace.get_config_dict | python | def get_config_dict(self):
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict | Reconstruct the nested structure of this object's configuration
and return it as a dict. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L114-L124 | [
"def get_config_values(self):\n \"\"\"Return all configuration stored in this object as a dict.\n \"\"\"\n return self.configuration_values\n"
] | class ConfigNamespace(object):
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name):
self.name = name
self.configuration_values = {}
self.value_proxies = weakref.WeakValueDictionary()
def get_name(self):
return self.name
def get_value_proxies(self):
return list(self.value_proxies.values())
def register_proxy(self, proxy):
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data,
error_on_unknown,
error_on_dupe,
log_keys_only=False,
):
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args, **kwargs):
self.configuration_values.update(*args, **kwargs)
def get_config_values(self):
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_known_keys(self):
return set(vproxy.config_key for vproxy in self.get_value_proxies())
def validate_keys(
self,
config_data,
error_on_unknown,
log_keys_only=False,
):
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown = [k for k, _ in unknown]
msg = "Unexpected value in %s configuration: %s" % (self.name, unknown)
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(self, config_data, error_on_duplicate):
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item, default=None):
return self.configuration_values.get(item, default)
def __getitem__(self, item):
return self.configuration_values[item]
def __setitem__(self, key, value):
self.configuration_values[key] = value
def __contains__(self, item):
return item in self.configuration_values
def clear(self):
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self):
self.clear()
self.value_proxies.clear()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
|
dnephin/PyStaticConfiguration | staticconf/config.py | ConfigHelp.view_help | python | def view_help(self):
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp)) | Return a help message describing all the statically configured keys. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L238-L259 | null | class ConfigHelp(object):
"""Register and display help messages about config keys."""
def __init__(self):
self.descriptions = {}
def add(self, name, validator, default, namespace, help):
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def clear(self):
self.descriptions.clear()
|
dnephin/PyStaticConfiguration | staticconf/config.py | ConfigurationWatcher.reload_if_changed | python | def reload_if_changed(self, force=False):
if (force or self.should_check) and self.file_modified():
return self.reload() | If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L369-L379 | [
"def file_modified(self):\n self.last_check = time.time()\n return any(comp.has_changed() for comp in self.comparators)\n"
] | class ConfigurationWatcher(object):
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader,
filenames,
min_interval=0,
reloader=None,
comparators=None):
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames):
if isinstance(filenames, six.string_types):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self):
return self.last_check + self.min_interval <= time.time()
def file_modified(self):
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self):
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self):
return self.reloader
def load_config(self):
return self.config_loader()
|
dnephin/PyStaticConfiguration | staticconf/config.py | ConfigFacade.load | python | def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
):
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher) | Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade` | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L585-L620 | [
"def build_loader_callable(load_func, filename, namespace):\n def load_configuration():\n get_namespace(namespace).clear()\n return load_func(filename, namespace=namespace)\n return load_configuration\n",
"def load_config(self):\n return self.config_loader()\n"
] | class ConfigFacade(object):
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher):
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def add_callback(self, identifier, callback):
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force=False):
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
|
dnephin/PyStaticConfiguration | staticconf/validation.py | _validate_iterable | python | def _validate_iterable(iterable_type, value):
if isinstance(value, six.string_types):
msg = "Invalid iterable of type(%s): %s"
raise ValidationError(msg % (type(value), value))
try:
return iterable_type(value)
except TypeError:
raise ValidationError("Invalid iterable: %s" % (value)) | Convert the iterable to iterable_type, or raise a Configuration
exception. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L90-L101 | null | """
Validate a configuration value by converting it to a specific type.
These functions are used by :mod:`staticconf.readers` and
:mod:`staticconf.schema` to coerce config values to a type.
"""
import datetime
import logging
import re
import time
import six
from staticconf.errors import ValidationError
def validate_string(value):
return None if value is None else six.text_type(value)
def validate_bool(value):
return None if value is None else bool(value)
def validate_numeric(type_func, value):
try:
return type_func(value)
except ValueError:
raise ValidationError("Invalid %s: %s" % (type_func.__name__, value))
def validate_int(value):
return validate_numeric(int, value)
def validate_float(value):
return validate_numeric(float, value)
date_formats = [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %I:%M:%S %p",
"%Y-%m-%d",
"%y-%m-%d",
"%m/%d/%y",
"%m/%d/%Y",
]
def validate_datetime(value):
if isinstance(value, datetime.datetime):
return value
for format_ in date_formats:
try:
return datetime.datetime.strptime(value, format_)
except ValueError:
pass
raise ValidationError("Invalid date format: %s" % value)
def validate_date(value):
if isinstance(value, datetime.date):
return value
return validate_datetime(value).date()
time_formats = [
"%I %p",
"%H:%M",
"%I:%M %p",
"%H:%M:%S",
"%I:%M:%S %p"
]
def validate_time(value):
if isinstance(value, datetime.time):
return value
for format_ in time_formats:
try:
return datetime.time(*time.strptime(value, format_)[3:6])
except ValueError:
pass
raise ValidationError("Invalid time format: %s" % value)
def validate_list(value):
return _validate_iterable(list, value)
def validate_set(value):
return _validate_iterable(set, value)
def validate_tuple(value):
return _validate_iterable(tuple, value)
def validate_regex(value):
try:
return re.compile(value)
except (re.error, TypeError) as e:
raise ValidationError("Invalid regex: %s, %s" % (e, value))
def build_list_type_validator(item_validator):
"""Return a function which validates that the value is a list of items
which are validated using item_validator.
"""
def validate_list_of_type(value):
return [item_validator(item) for item in validate_list(value)]
return validate_list_of_type
def build_map_type_validator(item_validator):
"""Return a function which validates that the value is a mapping of
items. The function should return pairs of items that will be
passed to the `dict` constructor.
"""
def validate_mapping(value):
return dict(item_validator(item) for item in validate_list(value))
return validate_mapping
def validate_log_level(value):
"""Validate a log level from a string value. Returns a constant from
the :mod:`logging` module.
"""
try:
return getattr(logging, value)
except AttributeError:
raise ValidationError("Unknown log level: %s" % value)
def validate_any(value):
return value
validators = {
'': validate_any,
'bool': validate_bool,
'date': validate_date,
'datetime': validate_datetime,
'float': validate_float,
'int': validate_int,
'list': validate_list,
'set': validate_set,
'string': validate_string,
'time': validate_time,
'tuple': validate_tuple,
'regex': validate_regex,
'log_level': validate_log_level,
}
def get_validators():
"""Return an iterator of (validator_name, validator) pairs."""
return six.iteritems(validators)
|
dnephin/PyStaticConfiguration | staticconf/validation.py | build_list_type_validator | python | def build_list_type_validator(item_validator):
def validate_list_of_type(value):
return [item_validator(item) for item in validate_list(value)]
return validate_list_of_type | Return a function which validates that the value is a list of items
which are validated using item_validator. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L123-L129 | null | """
Validate a configuration value by converting it to a specific type.
These functions are used by :mod:`staticconf.readers` and
:mod:`staticconf.schema` to coerce config values to a type.
"""
import datetime
import logging
import re
import time
import six
from staticconf.errors import ValidationError
def validate_string(value):
return None if value is None else six.text_type(value)
def validate_bool(value):
return None if value is None else bool(value)
def validate_numeric(type_func, value):
try:
return type_func(value)
except ValueError:
raise ValidationError("Invalid %s: %s" % (type_func.__name__, value))
def validate_int(value):
return validate_numeric(int, value)
def validate_float(value):
return validate_numeric(float, value)
date_formats = [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %I:%M:%S %p",
"%Y-%m-%d",
"%y-%m-%d",
"%m/%d/%y",
"%m/%d/%Y",
]
def validate_datetime(value):
if isinstance(value, datetime.datetime):
return value
for format_ in date_formats:
try:
return datetime.datetime.strptime(value, format_)
except ValueError:
pass
raise ValidationError("Invalid date format: %s" % value)
def validate_date(value):
if isinstance(value, datetime.date):
return value
return validate_datetime(value).date()
time_formats = [
"%I %p",
"%H:%M",
"%I:%M %p",
"%H:%M:%S",
"%I:%M:%S %p"
]
def validate_time(value):
if isinstance(value, datetime.time):
return value
for format_ in time_formats:
try:
return datetime.time(*time.strptime(value, format_)[3:6])
except ValueError:
pass
raise ValidationError("Invalid time format: %s" % value)
def _validate_iterable(iterable_type, value):
"""Convert the iterable to iterable_type, or raise a Configuration
exception.
"""
if isinstance(value, six.string_types):
msg = "Invalid iterable of type(%s): %s"
raise ValidationError(msg % (type(value), value))
try:
return iterable_type(value)
except TypeError:
raise ValidationError("Invalid iterable: %s" % (value))
def validate_list(value):
return _validate_iterable(list, value)
def validate_set(value):
return _validate_iterable(set, value)
def validate_tuple(value):
return _validate_iterable(tuple, value)
def validate_regex(value):
try:
return re.compile(value)
except (re.error, TypeError) as e:
raise ValidationError("Invalid regex: %s, %s" % (e, value))
def build_map_type_validator(item_validator):
"""Return a function which validates that the value is a mapping of
items. The function should return pairs of items that will be
passed to the `dict` constructor.
"""
def validate_mapping(value):
return dict(item_validator(item) for item in validate_list(value))
return validate_mapping
def validate_log_level(value):
"""Validate a log level from a string value. Returns a constant from
the :mod:`logging` module.
"""
try:
return getattr(logging, value)
except AttributeError:
raise ValidationError("Unknown log level: %s" % value)
def validate_any(value):
return value
validators = {
'': validate_any,
'bool': validate_bool,
'date': validate_date,
'datetime': validate_datetime,
'float': validate_float,
'int': validate_int,
'list': validate_list,
'set': validate_set,
'string': validate_string,
'time': validate_time,
'tuple': validate_tuple,
'regex': validate_regex,
'log_level': validate_log_level,
}
def get_validators():
"""Return an iterator of (validator_name, validator) pairs."""
return six.iteritems(validators)
|
dnephin/PyStaticConfiguration | staticconf/validation.py | build_map_type_validator | python | def build_map_type_validator(item_validator):
def validate_mapping(value):
return dict(item_validator(item) for item in validate_list(value))
return validate_mapping | Return a function which validates that the value is a mapping of
items. The function should return pairs of items that will be
passed to the `dict` constructor. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L132-L139 | null | """
Validate a configuration value by converting it to a specific type.
These functions are used by :mod:`staticconf.readers` and
:mod:`staticconf.schema` to coerce config values to a type.
"""
import datetime
import logging
import re
import time
import six
from staticconf.errors import ValidationError
def validate_string(value):
return None if value is None else six.text_type(value)
def validate_bool(value):
return None if value is None else bool(value)
def validate_numeric(type_func, value):
try:
return type_func(value)
except ValueError:
raise ValidationError("Invalid %s: %s" % (type_func.__name__, value))
def validate_int(value):
return validate_numeric(int, value)
def validate_float(value):
return validate_numeric(float, value)
date_formats = [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %I:%M:%S %p",
"%Y-%m-%d",
"%y-%m-%d",
"%m/%d/%y",
"%m/%d/%Y",
]
def validate_datetime(value):
if isinstance(value, datetime.datetime):
return value
for format_ in date_formats:
try:
return datetime.datetime.strptime(value, format_)
except ValueError:
pass
raise ValidationError("Invalid date format: %s" % value)
def validate_date(value):
if isinstance(value, datetime.date):
return value
return validate_datetime(value).date()
time_formats = [
"%I %p",
"%H:%M",
"%I:%M %p",
"%H:%M:%S",
"%I:%M:%S %p"
]
def validate_time(value):
if isinstance(value, datetime.time):
return value
for format_ in time_formats:
try:
return datetime.time(*time.strptime(value, format_)[3:6])
except ValueError:
pass
raise ValidationError("Invalid time format: %s" % value)
def _validate_iterable(iterable_type, value):
"""Convert the iterable to iterable_type, or raise a Configuration
exception.
"""
if isinstance(value, six.string_types):
msg = "Invalid iterable of type(%s): %s"
raise ValidationError(msg % (type(value), value))
try:
return iterable_type(value)
except TypeError:
raise ValidationError("Invalid iterable: %s" % (value))
def validate_list(value):
return _validate_iterable(list, value)
def validate_set(value):
return _validate_iterable(set, value)
def validate_tuple(value):
return _validate_iterable(tuple, value)
def validate_regex(value):
try:
return re.compile(value)
except (re.error, TypeError) as e:
raise ValidationError("Invalid regex: %s, %s" % (e, value))
def build_list_type_validator(item_validator):
"""Return a function which validates that the value is a list of items
which are validated using item_validator.
"""
def validate_list_of_type(value):
return [item_validator(item) for item in validate_list(value)]
return validate_list_of_type
def validate_log_level(value):
"""Validate a log level from a string value. Returns a constant from
the :mod:`logging` module.
"""
try:
return getattr(logging, value)
except AttributeError:
raise ValidationError("Unknown log level: %s" % value)
def validate_any(value):
return value
validators = {
'': validate_any,
'bool': validate_bool,
'date': validate_date,
'datetime': validate_datetime,
'float': validate_float,
'int': validate_int,
'list': validate_list,
'set': validate_set,
'string': validate_string,
'time': validate_time,
'tuple': validate_tuple,
'regex': validate_regex,
'log_level': validate_log_level,
}
def get_validators():
"""Return an iterator of (validator_name, validator) pairs."""
return six.iteritems(validators)
|
dnephin/PyStaticConfiguration | staticconf/getters.py | register_value_proxy | python | def register_value_proxy(namespace, value_proxy, help_text):
namespace.register_proxy(value_proxy)
config.config_help.add(
value_proxy.config_key, value_proxy.validator, value_proxy.default,
namespace.get_name(), help_text) | Register a value proxy with the namespace, and add the help_text. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/getters.py#L68-L73 | [
"def add(self, name, validator, default, namespace, help):\n desc = KeyDescription(name, validator, default, help)\n self.descriptions.setdefault(namespace, []).append(desc)\n"
] | """
Functions used to retrieve proxies around values in a
:class:`staticconf.config.ConfigNamespace`. All of the getter methods
return a :class:`ValueProxy`. These proxies are wrappers around a configuration
value. They don't access the configuration until some attribute of the object
is accessed.
.. warning::
This module should be considered deprecated. There are edge cases which
make these getters non-obvious to use (such as passing a :class:`ValueProxy`
to a cmodule.
Please use :class:`staticconf.readers` if you don't need static
definitions, or :class:`staticconf.schema` if you do.
Example
-------
.. code-block:: python
import staticconf
# Returns a ValueProxy which can be used just like an int
max_cycles = staticconf.get_int('max_cycles')
print "Half of max_cycles", max_cycles / 2
# Using a NamespaceGetters object to retrieve from a namespace
config = staticconf.NamespaceGetters('special')
ratio = config.get_float('ratio')
To retrieve values from a namespace, you can create a ``NamespaceGetters``
object.
.. code-block:: python
my_package_conf = staticconf.NamespaceGetters('my_package_namespace')
max_size = my_package_conf.get_int('max_size')
error_msg = my_package_conf.get_string('error_msg')
Arguments
---------
Getters accept the following kwargs:
config_key
string configuration key
default
if no ``default`` is given, the key must be present in the configuration.
Raises ConfigurationError on missing key.
help
a help string describing the purpose of the config value. See
:func:`staticconf.config.view_help()`.
namespace
get the value from this namespace instead of DEFAULT.
"""
from staticconf import config, proxy, readers
from staticconf.proxy import UndefToken
class ProxyFactory(object):
"""Create ProxyValue objects so that there is never a duplicate
proxy for any (namespace, validator, config_key, default) group.
"""
def __init__(self):
self.proxies = {}
def build(self, validator, namespace, config_key, default, help):
"""Build or retrieve a ValueProxy from the attributes. Proxies are
keyed using a repr because default values can be mutable types.
"""
proxy_attrs = validator, namespace, config_key, default
proxy_key = repr(proxy_attrs)
if proxy_key in self.proxies:
return self.proxies[proxy_key]
value_proxy = proxy.ValueProxy(*proxy_attrs)
register_value_proxy(namespace, value_proxy, help)
return self.proxies.setdefault(proxy_key, value_proxy)
proxy_factory = ProxyFactory()
def build_getter(validator, getter_namespace=None):
"""Create a getter function for retrieving values from the config cache.
Getters will default to the DEFAULT namespace.
"""
def proxy_register(key_name, default=UndefToken, help=None, namespace=None):
name = namespace or getter_namespace or config.DEFAULT
namespace = config.get_namespace(name)
return proxy_factory.build(validator, namespace, key_name, default, help)
return proxy_register
class GetterNameFactory(object):
@staticmethod
def get_name(validator_name):
return 'get_%s' % validator_name if validator_name else 'get'
@staticmethod
def get_list_of_name(validator_name):
return 'get_list_of_%s' % validator_name
NamespaceGetters = readers.build_accessor_type(GetterNameFactory, build_getter)
default_getters = NamespaceGetters(config.DEFAULT)
globals().update(default_getters.get_methods())
__all__ = ['NamespaceGetters'] + list(default_getters.get_methods())
|
dnephin/PyStaticConfiguration | staticconf/getters.py | build_getter | python | def build_getter(validator, getter_namespace=None):
def proxy_register(key_name, default=UndefToken, help=None, namespace=None):
name = namespace or getter_namespace or config.DEFAULT
namespace = config.get_namespace(name)
return proxy_factory.build(validator, namespace, key_name, default, help)
return proxy_register | Create a getter function for retrieving values from the config cache.
Getters will default to the DEFAULT namespace. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/getters.py#L101-L110 | null | """
Functions used to retrieve proxies around values in a
:class:`staticconf.config.ConfigNamespace`. All of the getter methods
return a :class:`ValueProxy`. These proxies are wrappers around a configuration
value. They don't access the configuration until some attribute of the object
is accessed.
.. warning::
This module should be considered deprecated. There are edge cases which
make these getters non-obvious to use (such as passing a :class:`ValueProxy`
to a cmodule.
Please use :class:`staticconf.readers` if you don't need static
definitions, or :class:`staticconf.schema` if you do.
Example
-------
.. code-block:: python
import staticconf
# Returns a ValueProxy which can be used just like an int
max_cycles = staticconf.get_int('max_cycles')
print "Half of max_cycles", max_cycles / 2
# Using a NamespaceGetters object to retrieve from a namespace
config = staticconf.NamespaceGetters('special')
ratio = config.get_float('ratio')
To retrieve values from a namespace, you can create a ``NamespaceGetters``
object.
.. code-block:: python
my_package_conf = staticconf.NamespaceGetters('my_package_namespace')
max_size = my_package_conf.get_int('max_size')
error_msg = my_package_conf.get_string('error_msg')
Arguments
---------
Getters accept the following kwargs:
config_key
string configuration key
default
if no ``default`` is given, the key must be present in the configuration.
Raises ConfigurationError on missing key.
help
a help string describing the purpose of the config value. See
:func:`staticconf.config.view_help()`.
namespace
get the value from this namespace instead of DEFAULT.
"""
from staticconf import config, proxy, readers
from staticconf.proxy import UndefToken
def register_value_proxy(namespace, value_proxy, help_text):
"""Register a value proxy with the namespace, and add the help_text."""
namespace.register_proxy(value_proxy)
config.config_help.add(
value_proxy.config_key, value_proxy.validator, value_proxy.default,
namespace.get_name(), help_text)
class ProxyFactory(object):
"""Create ProxyValue objects so that there is never a duplicate
proxy for any (namespace, validator, config_key, default) group.
"""
def __init__(self):
self.proxies = {}
def build(self, validator, namespace, config_key, default, help):
"""Build or retrieve a ValueProxy from the attributes. Proxies are
keyed using a repr because default values can be mutable types.
"""
proxy_attrs = validator, namespace, config_key, default
proxy_key = repr(proxy_attrs)
if proxy_key in self.proxies:
return self.proxies[proxy_key]
value_proxy = proxy.ValueProxy(*proxy_attrs)
register_value_proxy(namespace, value_proxy, help)
return self.proxies.setdefault(proxy_key, value_proxy)
proxy_factory = ProxyFactory()
class GetterNameFactory(object):
@staticmethod
def get_name(validator_name):
return 'get_%s' % validator_name if validator_name else 'get'
@staticmethod
def get_list_of_name(validator_name):
return 'get_list_of_%s' % validator_name
NamespaceGetters = readers.build_accessor_type(GetterNameFactory, build_getter)
default_getters = NamespaceGetters(config.DEFAULT)
globals().update(default_getters.get_methods())
__all__ = ['NamespaceGetters'] + list(default_getters.get_methods())
|
dnephin/PyStaticConfiguration | staticconf/getters.py | ProxyFactory.build | python | def build(self, validator, namespace, config_key, default, help):
proxy_attrs = validator, namespace, config_key, default
proxy_key = repr(proxy_attrs)
if proxy_key in self.proxies:
return self.proxies[proxy_key]
value_proxy = proxy.ValueProxy(*proxy_attrs)
register_value_proxy(namespace, value_proxy, help)
return self.proxies.setdefault(proxy_key, value_proxy) | Build or retrieve a ValueProxy from the attributes. Proxies are
keyed using a repr because default values can be mutable types. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/getters.py#L84-L95 | [
"def register_value_proxy(namespace, value_proxy, help_text):\n \"\"\"Register a value proxy with the namespace, and add the help_text.\"\"\"\n namespace.register_proxy(value_proxy)\n config.config_help.add(\n value_proxy.config_key, value_proxy.validator, value_proxy.default,\n namespace.get_name(), help_text)\n"
] | class ProxyFactory(object):
"""Create ProxyValue objects so that there is never a duplicate
proxy for any (namespace, validator, config_key, default) group.
"""
def __init__(self):
self.proxies = {}
|
jlevy/strif | strif.py | new_timestamped_uid | python | def new_timestamped_uid(bits=32):
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits)) | A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L76-L82 | [
"def new_uid(bits=64):\n \"\"\"\n A random alphanumeric value with at least the specified bits of randomness. We use base 36,\n i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.\n \"\"\"\n return \"\".join(_RANDOM.sample(\"0123456789abcdefghijklmnopqrstuvwxyz\",\n int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16\n"
] | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | abbreviate_str | python | def abbreviate_str(string, max_len=80, indicator="..."):
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator | Abbreviate a string, adding an indicator like an ellipsis if required. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L85-L94 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | abbreviate_list | python | def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened) | Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L97-L108 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | expand_variables | python | def expand_variables(template_str, value_map, transformer=None):
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e)) | Expand a template string like "blah blah $FOO blah" using given value mapping. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L114-L128 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | shell_expand_to_popen | python | def shell_expand_to_popen(template, values):
return [expand_variables(item, values) for item in shlex.split(template)] | Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L139-L143 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | move_to_backup | python | def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path) | Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L149-L164 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | make_all_dirs | python | def make_all_dirs(path, mode=0o777):
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path | Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L167-L182 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | make_parent_dirs | python | def make_parent_dirs(path, mode=0o777):
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path | Ensure parent directories of a file are created as needed. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L185-L192 | [
"def make_all_dirs(path, mode=0o777):\n \"\"\"\n Ensure local dir, with all its parent dirs, are created.\n Unlike os.makedirs(), will not fail if the path already exists.\n \"\"\"\n # Avoid races inherent to doing this in two steps (check then create).\n # Python 3 has exist_ok but the approach below works for Python 2+3.\n # https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python\n try:\n os.makedirs(path, mode=mode)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n return path\n"
] | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | atomic_output_file | python | def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path) | A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L196-L221 | [
"def new_uid(bits=64):\n \"\"\"\n A random alphanumeric value with at least the specified bits of randomness. We use base 36,\n i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.\n \"\"\"\n return \"\".join(_RANDOM.sample(\"0123456789abcdefghijklmnopqrstuvwxyz\",\n int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16\n",
"def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):\n \"\"\"\n Move the given file or directory to the same name, with a backup suffix.\n If backup_suffix not supplied, move it to the extension \".bak\".\n NB: If backup_suffix is supplied and is None, don't do anything.\n \"\"\"\n if backup_suffix and os.path.exists(path):\n backup_path = path + backup_suffix\n # Some messy corner cases need to be handled for existing backups.\n # TODO: Note if this is a directory, and we do this twice at once, there is a potential race\n # that could leave one backup inside the other.\n if os.path.islink(backup_path):\n os.unlink(backup_path)\n elif os.path.isdir(backup_path):\n shutil.rmtree(backup_path)\n shutil.move(path, backup_path)\n",
"def make_parent_dirs(path, mode=0o777):\n \"\"\"\n Ensure parent directories of a file are created as needed.\n \"\"\"\n parent = os.path.dirname(path)\n if parent:\n make_all_dirs(parent, mode)\n return path\n"
] | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | temp_output_file | python | def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean) | A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
... | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L224-L234 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | temp_output_dir | python | def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean) | A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
... | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L237-L247 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | read_string_from_file | python | def read_string_from_file(path, encoding="utf8"):
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value | Read entire contents of file into a string. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L278-L284 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | write_string_to_file | python | def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string) | Write entire file with given string contents, atomically. Keeps backup by default. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L287-L293 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | set_file_mtime | python | def set_file_mtime(path, mtime, atime=None):
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close() | Set access and modification times on a file. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L296-L304 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | copyfile_atomic | python | def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path)) | Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L307-L313 | [
"def set_file_mtime(path, mtime, atime=None):\n \"\"\"Set access and modification times on a file.\"\"\"\n if not atime:\n atime = mtime\n f = open(path, 'a')\n try:\n os.utime(path, (atime, mtime))\n finally:\n f.close()\n"
] | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | copytree_atomic | python | def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix) | Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L316-L325 | [
"def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):\n \"\"\"\n Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.\n \"\"\"\n with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:\n shutil.copyfile(source_path, tmp_path)\n set_file_mtime(tmp_path, os.path.getmtime(source_path))\n"
] | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | movefile | python | def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path) | Move file. With a few extra options. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L328-L335 | [
"def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):\n \"\"\"\n Move the given file or directory to the same name, with a backup suffix.\n If backup_suffix not supplied, move it to the extension \".bak\".\n NB: If backup_suffix is supplied and is None, don't do anything.\n \"\"\"\n if backup_suffix and os.path.exists(path):\n backup_path = path + backup_suffix\n # Some messy corner cases need to be handled for existing backups.\n # TODO: Note if this is a directory, and we do this twice at once, there is a potential race\n # that could leave one backup inside the other.\n if os.path.islink(backup_path):\n os.unlink(backup_path)\n elif os.path.isdir(backup_path):\n shutil.rmtree(backup_path)\n shutil.move(path, backup_path)\n",
"def make_parent_dirs(path, mode=0o777):\n \"\"\"\n Ensure parent directories of a file are created as needed.\n \"\"\"\n parent = os.path.dirname(path)\n if parent:\n make_all_dirs(parent, mode)\n return path\n"
] | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | rmtree_or_file | python | def rmtree_or_file(path, ignore_errors=False, onerror=None):
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path) | rmtree fails on files or symlinks. This removes the target, whatever it is. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L338-L349 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | chmod_native | python | def chmod_native(path, mode_expression, recursive=False):
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs) | This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L352-L363 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def file_sha1(path):
"""
Compute SHA1 hash of a file.
"""
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest()
|
jlevy/strif | strif.py | file_sha1 | python | def file_sha1(path):
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
block = f.read(2 ** 10)
if not block:
break
sha1.update(block)
return sha1.hexdigest() | Compute SHA1 hash of a file. | train | https://github.com/jlevy/strif/blob/5a066f7a075ca822da59d665cfe88f0afd39a793/strif.py#L366-L377 | null | """
Strif is a tiny (<1000 loc) library of string- and file-related utilities for Python 2.7 and 3.
More information: https://github.com/jlevy/strif
"""
from string import Template
import re
import os
import errno
import random
import shutil
import shlex
import pipes
import tempfile
import hashlib
import codecs
from contextlib import contextmanager
from datetime import datetime
__author__ = 'jlevy'
VERSION = "0.2.2"
DESCRIPTION = "Tiny, useful lib for strings and files"
LONG_DESCRIPTION = __doc__
# The subprocess module has known threading issues, so prefer subprocess32.
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
# A pre-opened handle to /dev/null.
DEV_NULL = open(os.devnull, 'wb')
BACKUP_SUFFIX = ".bak"
_RANDOM = random.SystemRandom()
_RANDOM.seed()
def dict_merge(*dict_list):
"""
Given zero or more dicts, shallow copy and merge them into a new dict, with
precedence to dictionary values later in the dict list.
Helpful mainly before Python 3.5.
https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
https://docs.python.org/dev/whatsnew/3.5.html#pep-448-additional-unpacking-generalizations
"""
result = {}
for d in dict_list:
result.update(d)
return result
#
# ---- Identifiers and abbreviations ----
def new_uid(bits=64):
"""
A random alphanumeric value with at least the specified bits of randomness. We use base 36,
i.e. not case sensitive. Note this makes it suitable for filenames even on case-insensitive disks.
"""
return "".join(_RANDOM.sample("0123456789abcdefghijklmnopqrstuvwxyz",
int(bits / 5.16) + 1)) # log(26 + 10)/log(2) = 5.16
def iso_timestamp():
"""
ISO timestamp. With the Z for usual clarity.
Example: 2015-09-12T08:41:12.397217Z
"""
return datetime.now().isoformat() + 'Z'
def new_timestamped_uid(bits=32):
"""
A unique id that begins with an ISO timestamp followed by fractions of seconds and bits of
randomness. The advantage of this is it sorts nicely by time, while still being unique.
Example: 20150912T084555Z-378465-43vtwbx
"""
return "%s-%s" % (re.sub('[^\w.]', '', datetime.now().isoformat()).replace(".", "Z-"), new_uid(bits))
def abbreviate_str(string, max_len=80, indicator="..."):
"""
Abbreviate a string, adding an indicator like an ellipsis if required.
"""
if not string or not max_len or len(string) <= max_len:
return string
elif max_len <= len(indicator):
return string[0:max_len]
else:
return string[0:max_len - len(indicator)] + indicator
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened)
#
# ---- Templating ----
def expand_variables(template_str, value_map, transformer=None):
"""
Expand a template string like "blah blah $FOO blah" using given value mapping.
"""
if template_str is None:
return None
else:
if transformer is None:
transformer = lambda v: v
try:
# Don't bother iterating items for Python 2+3 compatibility.
transformed_value_map = {k: transformer(value_map[k]) for k in value_map}
return Template(template_str).substitute(transformed_value_map)
except Exception as e:
raise ValueError("could not expand variable names in command '%s': %s" % (template_str, e))
def shell_expand_variables(template_str, value_map):
"""
Expand a shell template string like "cp $SOURCE $TARGET/blah", also quoting values as needed
to ensure shell safety.
"""
return expand_variables(template_str, value_map, transformer=pipes.quote)
def shell_expand_to_popen(template, values):
"""
Expand a template like "cp $SOURCE $TARGET/blah" into a list of popen arguments.
"""
return [expand_variables(item, values) for item in shlex.split(template)]
#
# ---- File operations ----
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path)
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
def make_parent_dirs(path, mode=0o777):
"""
Ensure parent directories of a file are created as needed.
"""
parent = os.path.dirname(path)
if parent:
make_all_dirs(parent, mode)
return path
@contextmanager
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path)
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
def temp_output_dir(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary directory,
which is deleted when exiting the context.
Usage:
with temp_output_dir() as dirname:
...
"""
return _temp_output(True, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean)
@contextmanager
def _temp_output(is_dir, prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
if dir and make_parents:
make_all_dirs(dir)
if is_dir:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir)
result = path
else:
(fd, path) = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
result = (fd, path)
def clean():
try:
rmtree_or_file(path, ignore_errors=True)
except OSError:
pass
if always_clean:
try:
yield result
finally:
clean()
else:
yield result
clean()
def read_string_from_file(path, encoding="utf8"):
"""
Read entire contents of file into a string.
"""
with codecs.open(path, "rb", encoding=encoding) as f:
value = f.read()
return value
def write_string_to_file(path, string, make_parents=False, backup_suffix=BACKUP_SUFFIX, encoding="utf8"):
"""
Write entire file with given string contents, atomically. Keeps backup by default.
"""
with atomic_output_file(path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
with codecs.open(tmp_path, "wb", encoding=encoding) as f:
f.write(string)
def set_file_mtime(path, mtime, atime=None):
"""Set access and modification times on a file."""
if not atime:
atime = mtime
f = open(path, 'a')
try:
os.utime(path, (atime, mtime))
finally:
f.close()
def copyfile_atomic(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Copy file on local filesystem in an atomic way, so partial copies never exist. Preserves timestamps.
"""
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copyfile(source_path, tmp_path)
set_file_mtime(tmp_path, os.path.getmtime(source_path))
def copytree_atomic(source_path, dest_path, make_parents=False, backup_suffix=None, symlinks=False):
"""
Copy a file or directory recursively, and atomically, reanaming file or top-level dir when done.
Unlike shutil.copytree, this will not fail on a file.
"""
if os.path.isdir(source_path):
with atomic_output_file(dest_path, make_parents=make_parents, backup_suffix=backup_suffix) as tmp_path:
shutil.copytree(source_path, tmp_path, symlinks=symlinks)
else:
copyfile_atomic(source_path, dest_path, make_parents=make_parents, backup_suffix=backup_suffix)
def movefile(source_path, dest_path, make_parents=False, backup_suffix=None):
"""
Move file. With a few extra options.
"""
if make_parents:
make_parent_dirs(dest_path)
move_to_backup(dest_path, backup_suffix=backup_suffix)
shutil.move(source_path, dest_path)
def rmtree_or_file(path, ignore_errors=False, onerror=None):
"""
rmtree fails on files or symlinks. This removes the target, whatever it is.
"""
# TODO: Could add an rsync-based delete, as in
# https://github.com/vivlabs/instaclone/blob/master/instaclone/instaclone.py#L127-L143
if ignore_errors and not os.path.exists(path):
return
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
else:
os.unlink(path)
def chmod_native(path, mode_expression, recursive=False):
"""
This is ugly and will only work on POSIX, but the built-in Python os.chmod support
is very minimal, and neither supports fast recursive chmod nor "+X" type expressions,
both of which are slow for large trees. So just shell out.
"""
popenargs = ["chmod"]
if recursive:
popenargs.append("-R")
popenargs.append(mode_expression)
popenargs.append(path)
subprocess.check_call(popenargs)
|
edx/completion | completion/api/permissions.py | IsUserInUrl.has_permission | python | def has_permission(self, request, view):
url_username = request.parser_context.get('kwargs', {}).get('username', '')
if request.user.username.lower() != url_username.lower():
if request.user.is_staff:
return False # staff gets 403
raise Http404()
return True | Returns true if the current request is by the user themselves.
Note: a 404 is returned for non-staff instead of a 403. This is to prevent
users from being able to detect the existence of accounts. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/api/permissions.py#L35-L47 | null | class IsUserInUrl(BasePermission):
"""
Permission that checks to see if the request user matches the user in the URL.
"""
|
edx/completion | completion/fields.py | BigAutoField.db_type | python | def db_type(self, connection):
conn_module = type(connection).__module__
if "mysql" in conn_module:
return "bigint AUTO_INCREMENT"
elif "postgres" in conn_module:
return "bigserial"
return super(BigAutoField, self).db_type(connection) | The type of the field to insert into the database. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/fields.py#L19-L28 | null | class BigAutoField(models.AutoField):
"""
DO NOT USE - EXISTS FOR BACKWARDS COMPATIBILITY ONLY.
AutoField that uses BigIntegers.
This exists in Django as of version 1.10.
"""
def rel_db_type(self, connection): # pylint: disable=unused-argument
"""
The type to be used by relations pointing to this field.
Not used until Django 1.10.
"""
return "bigint"
|
edx/completion | completion/models.py | BlockCompletionManager.submit_completion | python | def submit_completion(self, user, course_key, block_key, completion):
# Raise ValueError to match normal django semantics for wrong type of field.
if not isinstance(course_key, CourseKey):
raise ValueError(
"course_key must be an instance of `opaque_keys.edx.keys.CourseKey`. Got {}".format(type(course_key))
)
try:
block_type = block_key.block_type
except AttributeError:
raise ValueError(
"block_key must be an instance of `opaque_keys.edx.keys.UsageKey`. Got {}".format(type(block_key))
)
if waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
try:
with transaction.atomic():
obj, is_new = self.get_or_create( # pylint: disable=unpacking-non-sequence
user=user,
course_key=course_key,
block_key=block_key,
defaults={
'completion': completion,
'block_type': block_type,
},
)
except IntegrityError:
# The completion was created concurrently by another process
log.info(
"An IntegrityError was raised when trying to create a BlockCompletion for %s:%s:%s. "
"Falling back to get().",
user,
course_key,
block_key,
)
obj = self.get(
user=user,
course_key=course_key,
block_key=block_key,
)
is_new = False
if not is_new and obj.completion != completion:
obj.completion = completion
obj.full_clean()
obj.save(update_fields={'completion', 'modified'})
else:
# If the feature is not enabled, this method should not be called.
# Error out with a RuntimeError.
raise RuntimeError(
"BlockCompletion.objects.submit_completion should not be \
called when the feature is disabled."
)
return obj, is_new | Update the completion value for the specified record.
Parameters:
* user (django.contrib.auth.models.User): The user for whom the
completion is being submitted.
* course_key (opaque_keys.edx.keys.CourseKey): The course in
which the submitted block is found.
* block_key (opaque_keys.edx.keys.UsageKey): The block that has had
its completion changed.
* completion (float in range [0.0, 1.0]): The fractional completion
value of the block (0.0 = incomplete, 1.0 = complete).
Return Value:
(BlockCompletion, bool): A tuple comprising the created or updated
BlockCompletion object and a boolean value indicating whether the
object was newly created by this call.
Raises:
ValueError:
If the wrong type is passed for one of the parameters.
django.core.exceptions.ValidationError:
If a float is passed that is not between 0.0 and 1.0.
django.db.DatabaseError:
If there was a problem getting, creating, or updating the
BlockCompletion record in the database.
This will also be a more specific error, as described here:
https://docs.djangoproject.com/en/1.11/ref/exceptions/#database-exceptions.
IntegrityError and OperationalError are relatively common
subclasses. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/models.py#L46-L132 | [
"def waffle():\n \"\"\"\n Returns the namespaced, cached, audited Waffle class for completion.\n \"\"\"\n return WaffleSwitchNamespace(name=WAFFLE_NAMESPACE, log_prefix='completion: ')\n"
] | class BlockCompletionManager(models.Manager):
"""
Custom manager for BlockCompletion model.
Adds submit_completion and submit_batch_completion methods.
"""
@transaction.atomic()
def submit_batch_completion(self, user, course_key, blocks):
"""
Performs a batch insertion of completion objects.
Parameters:
* user (django.contrib.auth.models.User): The user for whom the
completions are being submitted.
* course_key (opaque_keys.edx.keys.CourseKey): The course in
which the submitted blocks are found.
* blocks: A list of tuples of UsageKey to float completion values.
(float in range [0.0, 1.0]): The fractional completion
value of the block (0.0 = incomplete, 1.0 = complete).
Return Value:
Dict of (BlockCompletion, bool): A dictionary with a
BlockCompletion object key and a value of bool. The boolean value
indicates whether the object was newly created by this call.
Raises:
ValueError:
If the wrong type is passed for one of the parameters.
django.core.exceptions.ValidationError:
If a float is passed that is not between 0.0 and 1.0.
django.db.DatabaseError:
If there was a problem getting, creating, or updating the
BlockCompletion record in the database.
"""
block_completions = {}
for block, completion in blocks:
(block_completion, is_new) = self.submit_completion(user, course_key, block, completion)
block_completions[block_completion] = is_new
return block_completions
|
edx/completion | completion/models.py | BlockCompletionManager.submit_batch_completion | python | def submit_batch_completion(self, user, course_key, blocks):
block_completions = {}
for block, completion in blocks:
(block_completion, is_new) = self.submit_completion(user, course_key, block, completion)
block_completions[block_completion] = is_new
return block_completions | Performs a batch insertion of completion objects.
Parameters:
* user (django.contrib.auth.models.User): The user for whom the
completions are being submitted.
* course_key (opaque_keys.edx.keys.CourseKey): The course in
which the submitted blocks are found.
* blocks: A list of tuples of UsageKey to float completion values.
(float in range [0.0, 1.0]): The fractional completion
value of the block (0.0 = incomplete, 1.0 = complete).
Return Value:
Dict of (BlockCompletion, bool): A dictionary with a
BlockCompletion object key and a value of bool. The boolean value
indicates whether the object was newly created by this call.
Raises:
ValueError:
If the wrong type is passed for one of the parameters.
django.core.exceptions.ValidationError:
If a float is passed that is not between 0.0 and 1.0.
django.db.DatabaseError:
If there was a problem getting, creating, or updating the
BlockCompletion record in the database. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/models.py#L135-L169 | [
"def submit_completion(self, user, course_key, block_key, completion):\n \"\"\"\n Update the completion value for the specified record.\n\n Parameters:\n * user (django.contrib.auth.models.User): The user for whom the\n completion is being submitted.\n * course_key (opaque_keys.edx.keys.CourseKey): The course in\n which the submitted block is found.\n * block_key (opaque_keys.edx.keys.UsageKey): The block that has had\n its completion changed.\n * completion (float in range [0.0, 1.0]): The fractional completion\n value of the block (0.0 = incomplete, 1.0 = complete).\n\n Return Value:\n (BlockCompletion, bool): A tuple comprising the created or updated\n BlockCompletion object and a boolean value indicating whether the\n object was newly created by this call.\n\n Raises:\n\n ValueError:\n If the wrong type is passed for one of the parameters.\n\n django.core.exceptions.ValidationError:\n If a float is passed that is not between 0.0 and 1.0.\n\n django.db.DatabaseError:\n If there was a problem getting, creating, or updating the\n BlockCompletion record in the database.\n\n This will also be a more specific error, as described here:\n https://docs.djangoproject.com/en/1.11/ref/exceptions/#database-exceptions.\n IntegrityError and OperationalError are relatively common\n subclasses.\n \"\"\"\n\n # Raise ValueError to match normal django semantics for wrong type of field.\n if not isinstance(course_key, CourseKey):\n raise ValueError(\n \"course_key must be an instance of `opaque_keys.edx.keys.CourseKey`. Got {}\".format(type(course_key))\n )\n try:\n block_type = block_key.block_type\n except AttributeError:\n raise ValueError(\n \"block_key must be an instance of `opaque_keys.edx.keys.UsageKey`. Got {}\".format(type(block_key))\n )\n if waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):\n try:\n with transaction.atomic():\n obj, is_new = self.get_or_create( # pylint: disable=unpacking-non-sequence\n user=user,\n course_key=course_key,\n block_key=block_key,\n defaults={\n 'completion': completion,\n 'block_type': block_type,\n },\n )\n except IntegrityError:\n # The completion was created concurrently by another process\n log.info(\n \"An IntegrityError was raised when trying to create a BlockCompletion for %s:%s:%s. \"\n \"Falling back to get().\",\n user,\n course_key,\n block_key,\n )\n obj = self.get(\n user=user,\n course_key=course_key,\n block_key=block_key,\n )\n is_new = False\n if not is_new and obj.completion != completion:\n obj.completion = completion\n obj.full_clean()\n obj.save(update_fields={'completion', 'modified'})\n else:\n # If the feature is not enabled, this method should not be called.\n # Error out with a RuntimeError.\n raise RuntimeError(\n \"BlockCompletion.objects.submit_completion should not be \\\n called when the feature is disabled.\"\n )\n return obj, is_new\n"
] | class BlockCompletionManager(models.Manager):
"""
Custom manager for BlockCompletion model.
Adds submit_completion and submit_batch_completion methods.
"""
def submit_completion(self, user, course_key, block_key, completion):
"""
Update the completion value for the specified record.
Parameters:
* user (django.contrib.auth.models.User): The user for whom the
completion is being submitted.
* course_key (opaque_keys.edx.keys.CourseKey): The course in
which the submitted block is found.
* block_key (opaque_keys.edx.keys.UsageKey): The block that has had
its completion changed.
* completion (float in range [0.0, 1.0]): The fractional completion
value of the block (0.0 = incomplete, 1.0 = complete).
Return Value:
(BlockCompletion, bool): A tuple comprising the created or updated
BlockCompletion object and a boolean value indicating whether the
object was newly created by this call.
Raises:
ValueError:
If the wrong type is passed for one of the parameters.
django.core.exceptions.ValidationError:
If a float is passed that is not between 0.0 and 1.0.
django.db.DatabaseError:
If there was a problem getting, creating, or updating the
BlockCompletion record in the database.
This will also be a more specific error, as described here:
https://docs.djangoproject.com/en/1.11/ref/exceptions/#database-exceptions.
IntegrityError and OperationalError are relatively common
subclasses.
"""
# Raise ValueError to match normal django semantics for wrong type of field.
if not isinstance(course_key, CourseKey):
raise ValueError(
"course_key must be an instance of `opaque_keys.edx.keys.CourseKey`. Got {}".format(type(course_key))
)
try:
block_type = block_key.block_type
except AttributeError:
raise ValueError(
"block_key must be an instance of `opaque_keys.edx.keys.UsageKey`. Got {}".format(type(block_key))
)
if waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
try:
with transaction.atomic():
obj, is_new = self.get_or_create( # pylint: disable=unpacking-non-sequence
user=user,
course_key=course_key,
block_key=block_key,
defaults={
'completion': completion,
'block_type': block_type,
},
)
except IntegrityError:
# The completion was created concurrently by another process
log.info(
"An IntegrityError was raised when trying to create a BlockCompletion for %s:%s:%s. "
"Falling back to get().",
user,
course_key,
block_key,
)
obj = self.get(
user=user,
course_key=course_key,
block_key=block_key,
)
is_new = False
if not is_new and obj.completion != completion:
obj.completion = completion
obj.full_clean()
obj.save(update_fields={'completion', 'modified'})
else:
# If the feature is not enabled, this method should not be called.
# Error out with a RuntimeError.
raise RuntimeError(
"BlockCompletion.objects.submit_completion should not be \
called when the feature is disabled."
)
return obj, is_new
@transaction.atomic()
|
edx/completion | completion/models.py | BlockCompletion.full_block_key | python | def full_block_key(self):
if self.block_key.run is None:
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
return self.block_key.replace(course_key=self.course_key)
return self.block_key | Returns the "correct" usage key value with the run filled in. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/models.py#L202-L209 | null | class BlockCompletion(TimeStampedModel, models.Model):
"""
Track completion of completable blocks.
A completion is unique for each (user, course_key, block_key).
The block_type field is included separately from the block_key to
facilitate distinct aggregations of the completion of particular types of
block.
The completion value is stored as a float in the range [0.0, 1.0], and all
calculations are performed on this float, though current practice is to
only track binary completion, where 1.0 indicates that the block is
complete, and 0.0 indicates that the block is incomplete.
"""
id = BigAutoField(primary_key=True) # pylint: disable=invalid-name
user = models.ForeignKey(User, on_delete=models.CASCADE)
course_key = CourseKeyField(max_length=255)
# note: this usage key may not have the run filled in for
# old mongo courses. Use the full_block_key property
# instead when you want to use/compare the usage_key.
block_key = UsageKeyField(max_length=255)
block_type = models.CharField(max_length=64)
completion = models.FloatField(validators=[validate_percent])
objects = BlockCompletionManager()
@property
@classmethod
def get_course_completions(cls, user, course_key):
"""
Returns a dictionary mapping BlockKeys to completion values for all
BlockCompletion records for the given user and course_key.
Return value:
dict[BlockKey] = float
"""
user_course_completions = cls.user_course_completion_queryset(user, course_key)
return cls.completion_by_block_key(user_course_completions)
@classmethod
def user_course_completion_queryset(cls, user, course_key):
"""
Returns a Queryset of completions for a given user and course_key.
"""
return cls.objects.filter(user=user, course_key=course_key)
@classmethod
def latest_blocks_completed_all_courses(cls, user):
"""
Returns a dictionary mapping course_keys to a tuple containing
the block_key and modified time of the most recently modified
completion for the course.
Return value:
{course_key: (modified_date, block_key)}
"""
# Per the Django docs, dictionary params are not supported with the SQLite backend;
# with this backend, you must pass parameters as a list. We use SQLite for unit tests,
# so the same parameter is included twice in the parameter list below, rather than
# including it in a dictionary once.
latest_completions_by_course = cls.objects.raw(
'''
SELECT
cbc.id AS id,
cbc.course_key AS course_key,
cbc.block_key AS block_key,
cbc.modified AS modified
FROM
completion_blockcompletion cbc
JOIN (
SELECT
course_key,
MAX(modified) AS modified
FROM
completion_blockcompletion
WHERE
user_id = %s
GROUP BY
course_key
) latest
ON
cbc.course_key = latest.course_key AND
cbc.modified = latest.modified
WHERE
user_id = %s
;
''',
[user.id, user.id]
)
try:
return {
completion.course_key: (completion.modified, completion.block_key)
for completion in latest_completions_by_course
}
except KeyError:
# Iteration of the queryset above will always fail
# with a KeyError if the queryset is empty
return {}
@classmethod
def get_latest_block_completed(cls, user, course_key):
"""
Returns a BlockCompletion Object for the last modified user/course_key mapping,
or None if no such BlockCompletion exists.
Return value:
obj: block completion
"""
try:
latest_block_completion = cls.user_course_completion_queryset(user, course_key).latest()
except cls.DoesNotExist:
return
return latest_block_completion
@staticmethod
def completion_by_block_key(completion_iterable):
"""
Return value:
A dict mapping the full block key of a completion record to the completion value
for each BlockCompletion object given in completion_iterable. Each BlockKey is
corrected to have the run field filled in via the BlockCompletion.course_key field.
"""
return {completion.full_block_key: completion.completion for completion in completion_iterable}
class Meta(object):
index_together = [
('course_key', 'block_type', 'user'),
('user', 'course_key', 'modified'),
]
unique_together = [
('course_key', 'block_key', 'user')
]
get_latest_by = 'modified'
def __unicode__(self):
return 'BlockCompletion: {username}, {course_key}, {block_key}: {completion}'.format(
username=self.user.username,
course_key=self.course_key,
block_key=self.block_key,
completion=self.completion,
)
|
edx/completion | completion/models.py | BlockCompletion.get_course_completions | python | def get_course_completions(cls, user, course_key):
user_course_completions = cls.user_course_completion_queryset(user, course_key)
return cls.completion_by_block_key(user_course_completions) | Returns a dictionary mapping BlockKeys to completion values for all
BlockCompletion records for the given user and course_key.
Return value:
dict[BlockKey] = float | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/models.py#L212-L221 | null | class BlockCompletion(TimeStampedModel, models.Model):
"""
Track completion of completable blocks.
A completion is unique for each (user, course_key, block_key).
The block_type field is included separately from the block_key to
facilitate distinct aggregations of the completion of particular types of
block.
The completion value is stored as a float in the range [0.0, 1.0], and all
calculations are performed on this float, though current practice is to
only track binary completion, where 1.0 indicates that the block is
complete, and 0.0 indicates that the block is incomplete.
"""
id = BigAutoField(primary_key=True) # pylint: disable=invalid-name
user = models.ForeignKey(User, on_delete=models.CASCADE)
course_key = CourseKeyField(max_length=255)
# note: this usage key may not have the run filled in for
# old mongo courses. Use the full_block_key property
# instead when you want to use/compare the usage_key.
block_key = UsageKeyField(max_length=255)
block_type = models.CharField(max_length=64)
completion = models.FloatField(validators=[validate_percent])
objects = BlockCompletionManager()
@property
def full_block_key(self):
"""
Returns the "correct" usage key value with the run filled in.
"""
if self.block_key.run is None:
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
return self.block_key.replace(course_key=self.course_key)
return self.block_key
@classmethod
@classmethod
def user_course_completion_queryset(cls, user, course_key):
"""
Returns a Queryset of completions for a given user and course_key.
"""
return cls.objects.filter(user=user, course_key=course_key)
@classmethod
def latest_blocks_completed_all_courses(cls, user):
"""
Returns a dictionary mapping course_keys to a tuple containing
the block_key and modified time of the most recently modified
completion for the course.
Return value:
{course_key: (modified_date, block_key)}
"""
# Per the Django docs, dictionary params are not supported with the SQLite backend;
# with this backend, you must pass parameters as a list. We use SQLite for unit tests,
# so the same parameter is included twice in the parameter list below, rather than
# including it in a dictionary once.
latest_completions_by_course = cls.objects.raw(
'''
SELECT
cbc.id AS id,
cbc.course_key AS course_key,
cbc.block_key AS block_key,
cbc.modified AS modified
FROM
completion_blockcompletion cbc
JOIN (
SELECT
course_key,
MAX(modified) AS modified
FROM
completion_blockcompletion
WHERE
user_id = %s
GROUP BY
course_key
) latest
ON
cbc.course_key = latest.course_key AND
cbc.modified = latest.modified
WHERE
user_id = %s
;
''',
[user.id, user.id]
)
try:
return {
completion.course_key: (completion.modified, completion.block_key)
for completion in latest_completions_by_course
}
except KeyError:
# Iteration of the queryset above will always fail
# with a KeyError if the queryset is empty
return {}
@classmethod
def get_latest_block_completed(cls, user, course_key):
"""
Returns a BlockCompletion Object for the last modified user/course_key mapping,
or None if no such BlockCompletion exists.
Return value:
obj: block completion
"""
try:
latest_block_completion = cls.user_course_completion_queryset(user, course_key).latest()
except cls.DoesNotExist:
return
return latest_block_completion
@staticmethod
def completion_by_block_key(completion_iterable):
"""
Return value:
A dict mapping the full block key of a completion record to the completion value
for each BlockCompletion object given in completion_iterable. Each BlockKey is
corrected to have the run field filled in via the BlockCompletion.course_key field.
"""
return {completion.full_block_key: completion.completion for completion in completion_iterable}
class Meta(object):
index_together = [
('course_key', 'block_type', 'user'),
('user', 'course_key', 'modified'),
]
unique_together = [
('course_key', 'block_key', 'user')
]
get_latest_by = 'modified'
def __unicode__(self):
return 'BlockCompletion: {username}, {course_key}, {block_key}: {completion}'.format(
username=self.user.username,
course_key=self.course_key,
block_key=self.block_key,
completion=self.completion,
)
|
edx/completion | completion/models.py | BlockCompletion.user_course_completion_queryset | python | def user_course_completion_queryset(cls, user, course_key):
return cls.objects.filter(user=user, course_key=course_key) | Returns a Queryset of completions for a given user and course_key. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/models.py#L224-L228 | null | class BlockCompletion(TimeStampedModel, models.Model):
"""
Track completion of completable blocks.
A completion is unique for each (user, course_key, block_key).
The block_type field is included separately from the block_key to
facilitate distinct aggregations of the completion of particular types of
block.
The completion value is stored as a float in the range [0.0, 1.0], and all
calculations are performed on this float, though current practice is to
only track binary completion, where 1.0 indicates that the block is
complete, and 0.0 indicates that the block is incomplete.
"""
id = BigAutoField(primary_key=True) # pylint: disable=invalid-name
user = models.ForeignKey(User, on_delete=models.CASCADE)
course_key = CourseKeyField(max_length=255)
# note: this usage key may not have the run filled in for
# old mongo courses. Use the full_block_key property
# instead when you want to use/compare the usage_key.
block_key = UsageKeyField(max_length=255)
block_type = models.CharField(max_length=64)
completion = models.FloatField(validators=[validate_percent])
objects = BlockCompletionManager()
@property
def full_block_key(self):
"""
Returns the "correct" usage key value with the run filled in.
"""
if self.block_key.run is None:
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
return self.block_key.replace(course_key=self.course_key)
return self.block_key
@classmethod
def get_course_completions(cls, user, course_key):
"""
Returns a dictionary mapping BlockKeys to completion values for all
BlockCompletion records for the given user and course_key.
Return value:
dict[BlockKey] = float
"""
user_course_completions = cls.user_course_completion_queryset(user, course_key)
return cls.completion_by_block_key(user_course_completions)
@classmethod
@classmethod
def latest_blocks_completed_all_courses(cls, user):
"""
Returns a dictionary mapping course_keys to a tuple containing
the block_key and modified time of the most recently modified
completion for the course.
Return value:
{course_key: (modified_date, block_key)}
"""
# Per the Django docs, dictionary params are not supported with the SQLite backend;
# with this backend, you must pass parameters as a list. We use SQLite for unit tests,
# so the same parameter is included twice in the parameter list below, rather than
# including it in a dictionary once.
latest_completions_by_course = cls.objects.raw(
'''
SELECT
cbc.id AS id,
cbc.course_key AS course_key,
cbc.block_key AS block_key,
cbc.modified AS modified
FROM
completion_blockcompletion cbc
JOIN (
SELECT
course_key,
MAX(modified) AS modified
FROM
completion_blockcompletion
WHERE
user_id = %s
GROUP BY
course_key
) latest
ON
cbc.course_key = latest.course_key AND
cbc.modified = latest.modified
WHERE
user_id = %s
;
''',
[user.id, user.id]
)
try:
return {
completion.course_key: (completion.modified, completion.block_key)
for completion in latest_completions_by_course
}
except KeyError:
# Iteration of the queryset above will always fail
# with a KeyError if the queryset is empty
return {}
@classmethod
def get_latest_block_completed(cls, user, course_key):
"""
Returns a BlockCompletion Object for the last modified user/course_key mapping,
or None if no such BlockCompletion exists.
Return value:
obj: block completion
"""
try:
latest_block_completion = cls.user_course_completion_queryset(user, course_key).latest()
except cls.DoesNotExist:
return
return latest_block_completion
@staticmethod
def completion_by_block_key(completion_iterable):
"""
Return value:
A dict mapping the full block key of a completion record to the completion value
for each BlockCompletion object given in completion_iterable. Each BlockKey is
corrected to have the run field filled in via the BlockCompletion.course_key field.
"""
return {completion.full_block_key: completion.completion for completion in completion_iterable}
class Meta(object):
index_together = [
('course_key', 'block_type', 'user'),
('user', 'course_key', 'modified'),
]
unique_together = [
('course_key', 'block_key', 'user')
]
get_latest_by = 'modified'
def __unicode__(self):
return 'BlockCompletion: {username}, {course_key}, {block_key}: {completion}'.format(
username=self.user.username,
course_key=self.course_key,
block_key=self.block_key,
completion=self.completion,
)
|
edx/completion | completion/handlers.py | scorable_block_completion | python | def scorable_block_completion(sender, **kwargs): # pylint: disable=unused-argument
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
return
course_key = CourseKey.from_string(kwargs['course_id'])
block_key = UsageKey.from_string(kwargs['usage_id'])
block_cls = XBlock.load_class(block_key.block_type)
if XBlockCompletionMode.get_mode(block_cls) != XBlockCompletionMode.COMPLETABLE:
return
if getattr(block_cls, 'has_custom_completion', False):
return
user = User.objects.get(id=kwargs['user_id'])
if kwargs.get('score_deleted'):
completion = 0.0
else:
completion = 1.0
if not kwargs.get('grader_response'):
BlockCompletion.objects.submit_completion(
user=user,
course_key=course_key,
block_key=block_key,
completion=completion,
) | When a problem is scored, submit a new BlockCompletion for that block. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/handlers.py#L15-L39 | [
"def waffle():\n \"\"\"\n Returns the namespaced, cached, audited Waffle class for completion.\n \"\"\"\n return WaffleSwitchNamespace(name=WAFFLE_NAMESPACE, log_prefix='completion: ')\n"
] | """
Signal handlers to trigger completion updates.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from django.contrib.auth.models import User
from opaque_keys.edx.keys import CourseKey, UsageKey
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from . import waffle
from .models import BlockCompletion
|
edx/completion | completion/api/v1/views.py | CompletionBatchView._validate_and_parse | python | def _validate_and_parse(self, batch_object):
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
raise ValidationError(
_("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.")
)
for key in self.REQUIRED_KEYS:
if key not in batch_object:
raise ValidationError(_("Key '{key}' not found.").format(key=key))
username = batch_object['username']
user = User.objects.get(username=username)
course_key_obj = self._validate_and_parse_course_key(batch_object['course_key'])
if not CourseEnrollment.is_enrolled(user, course_key_obj):
raise ValidationError(_('User is not enrolled in course.'))
blocks = batch_object['blocks']
block_objs = []
for block_key in blocks:
block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj)
completion = float(blocks[block_key])
block_objs.append((block_key_obj, completion))
return user, course_key_obj, block_objs | Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/api/v1/views.py#L55-L107 | null | class CompletionBatchView(APIView):
"""
Handles API requests to submit batch completions.
"""
authentication_classes = (
JwtAuthentication, OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser,
)
permission_classes = (permissions.IsAuthenticated, IsStaffOrOwner,)
REQUIRED_KEYS = ['username', 'course_key', 'blocks']
def _validate_and_parse_course_key(self, course_key):
"""
Returns a validated parsed CourseKey deserialized from the given course_key.
"""
try:
return CourseKey.from_string(course_key)
except InvalidKeyError:
raise ValidationError(_("Invalid course key: {}").format(course_key))
def _validate_and_parse_block_key(self, block_key, course_key_obj):
"""
Returns a validated, parsed UsageKey deserialized from the given block_key.
"""
try:
block_key_obj = UsageKey.from_string(block_key)
except InvalidKeyError:
raise ValidationError(_("Invalid block key: {}").format(block_key))
if block_key_obj.run is None:
expected_matching_course_key = course_key_obj.replace(run=None)
else:
expected_matching_course_key = course_key_obj
if block_key_obj.course_key != expected_matching_course_key:
raise ValidationError(
_("Block with key: '{key}' is not in course {course}").format(key=block_key, course=course_key_obj)
)
return block_key_obj
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Inserts a batch of completions.
REST Endpoint Format:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
**Returns**
A Response object, with an appropriate status code.
If successful, status code is 200.
{
"detail" : _("ok")
}
Otherwise, a 400 or 404 may be returned, and the "detail" content will explain the error.
"""
batch_object = request.data or {}
try:
user, course_key, blocks = self._validate_and_parse(batch_object)
BlockCompletion.objects.submit_batch_completion(user, course_key, blocks)
except ValidationError as exc:
return Response({
"detail": _(' ').join(text_type(msg) for msg in exc.messages),
}, status=status.HTTP_400_BAD_REQUEST)
except ValueError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_400_BAD_REQUEST)
except ObjectDoesNotExist as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_404_NOT_FOUND)
except DatabaseError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
|
edx/completion | completion/api/v1/views.py | CompletionBatchView._validate_and_parse_course_key | python | def _validate_and_parse_course_key(self, course_key):
try:
return CourseKey.from_string(course_key)
except InvalidKeyError:
raise ValidationError(_("Invalid course key: {}").format(course_key)) | Returns a validated parsed CourseKey deserialized from the given course_key. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/api/v1/views.py#L109-L116 | null | class CompletionBatchView(APIView):
"""
Handles API requests to submit batch completions.
"""
authentication_classes = (
JwtAuthentication, OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser,
)
permission_classes = (permissions.IsAuthenticated, IsStaffOrOwner,)
REQUIRED_KEYS = ['username', 'course_key', 'blocks']
def _validate_and_parse(self, batch_object):
"""
Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised.
"""
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
raise ValidationError(
_("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.")
)
for key in self.REQUIRED_KEYS:
if key not in batch_object:
raise ValidationError(_("Key '{key}' not found.").format(key=key))
username = batch_object['username']
user = User.objects.get(username=username)
course_key_obj = self._validate_and_parse_course_key(batch_object['course_key'])
if not CourseEnrollment.is_enrolled(user, course_key_obj):
raise ValidationError(_('User is not enrolled in course.'))
blocks = batch_object['blocks']
block_objs = []
for block_key in blocks:
block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj)
completion = float(blocks[block_key])
block_objs.append((block_key_obj, completion))
return user, course_key_obj, block_objs
def _validate_and_parse_block_key(self, block_key, course_key_obj):
"""
Returns a validated, parsed UsageKey deserialized from the given block_key.
"""
try:
block_key_obj = UsageKey.from_string(block_key)
except InvalidKeyError:
raise ValidationError(_("Invalid block key: {}").format(block_key))
if block_key_obj.run is None:
expected_matching_course_key = course_key_obj.replace(run=None)
else:
expected_matching_course_key = course_key_obj
if block_key_obj.course_key != expected_matching_course_key:
raise ValidationError(
_("Block with key: '{key}' is not in course {course}").format(key=block_key, course=course_key_obj)
)
return block_key_obj
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Inserts a batch of completions.
REST Endpoint Format:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
**Returns**
A Response object, with an appropriate status code.
If successful, status code is 200.
{
"detail" : _("ok")
}
Otherwise, a 400 or 404 may be returned, and the "detail" content will explain the error.
"""
batch_object = request.data or {}
try:
user, course_key, blocks = self._validate_and_parse(batch_object)
BlockCompletion.objects.submit_batch_completion(user, course_key, blocks)
except ValidationError as exc:
return Response({
"detail": _(' ').join(text_type(msg) for msg in exc.messages),
}, status=status.HTTP_400_BAD_REQUEST)
except ValueError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_400_BAD_REQUEST)
except ObjectDoesNotExist as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_404_NOT_FOUND)
except DatabaseError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
|
edx/completion | completion/api/v1/views.py | CompletionBatchView._validate_and_parse_block_key | python | def _validate_and_parse_block_key(self, block_key, course_key_obj):
try:
block_key_obj = UsageKey.from_string(block_key)
except InvalidKeyError:
raise ValidationError(_("Invalid block key: {}").format(block_key))
if block_key_obj.run is None:
expected_matching_course_key = course_key_obj.replace(run=None)
else:
expected_matching_course_key = course_key_obj
if block_key_obj.course_key != expected_matching_course_key:
raise ValidationError(
_("Block with key: '{key}' is not in course {course}").format(key=block_key, course=course_key_obj)
)
return block_key_obj | Returns a validated, parsed UsageKey deserialized from the given block_key. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/api/v1/views.py#L118-L137 | null | class CompletionBatchView(APIView):
"""
Handles API requests to submit batch completions.
"""
authentication_classes = (
JwtAuthentication, OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser,
)
permission_classes = (permissions.IsAuthenticated, IsStaffOrOwner,)
REQUIRED_KEYS = ['username', 'course_key', 'blocks']
def _validate_and_parse(self, batch_object):
"""
Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised.
"""
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
raise ValidationError(
_("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.")
)
for key in self.REQUIRED_KEYS:
if key not in batch_object:
raise ValidationError(_("Key '{key}' not found.").format(key=key))
username = batch_object['username']
user = User.objects.get(username=username)
course_key_obj = self._validate_and_parse_course_key(batch_object['course_key'])
if not CourseEnrollment.is_enrolled(user, course_key_obj):
raise ValidationError(_('User is not enrolled in course.'))
blocks = batch_object['blocks']
block_objs = []
for block_key in blocks:
block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj)
completion = float(blocks[block_key])
block_objs.append((block_key_obj, completion))
return user, course_key_obj, block_objs
def _validate_and_parse_course_key(self, course_key):
"""
Returns a validated parsed CourseKey deserialized from the given course_key.
"""
try:
return CourseKey.from_string(course_key)
except InvalidKeyError:
raise ValidationError(_("Invalid course key: {}").format(course_key))
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Inserts a batch of completions.
REST Endpoint Format:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
**Returns**
A Response object, with an appropriate status code.
If successful, status code is 200.
{
"detail" : _("ok")
}
Otherwise, a 400 or 404 may be returned, and the "detail" content will explain the error.
"""
batch_object = request.data or {}
try:
user, course_key, blocks = self._validate_and_parse(batch_object)
BlockCompletion.objects.submit_batch_completion(user, course_key, blocks)
except ValidationError as exc:
return Response({
"detail": _(' ').join(text_type(msg) for msg in exc.messages),
}, status=status.HTTP_400_BAD_REQUEST)
except ValueError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_400_BAD_REQUEST)
except ObjectDoesNotExist as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_404_NOT_FOUND)
except DatabaseError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
|
edx/completion | completion/api/v1/views.py | CompletionBatchView.post | python | def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
batch_object = request.data or {}
try:
user, course_key, blocks = self._validate_and_parse(batch_object)
BlockCompletion.objects.submit_batch_completion(user, course_key, blocks)
except ValidationError as exc:
return Response({
"detail": _(' ').join(text_type(msg) for msg in exc.messages),
}, status=status.HTTP_400_BAD_REQUEST)
except ValueError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_400_BAD_REQUEST)
except ObjectDoesNotExist as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_404_NOT_FOUND)
except DatabaseError as exc:
return Response({
"detail": text_type(exc),
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({"detail": _("ok")}, status=status.HTTP_200_OK) | Inserts a batch of completions.
REST Endpoint Format:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
**Returns**
A Response object, with an appropriate status code.
If successful, status code is 200.
{
"detail" : _("ok")
}
Otherwise, a 400 or 404 may be returned, and the "detail" content will explain the error. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/api/v1/views.py#L139-L187 | null | class CompletionBatchView(APIView):
"""
Handles API requests to submit batch completions.
"""
authentication_classes = (
JwtAuthentication, OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser,
)
permission_classes = (permissions.IsAuthenticated, IsStaffOrOwner,)
REQUIRED_KEYS = ['username', 'course_key', 'blocks']
def _validate_and_parse(self, batch_object):
"""
Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised.
"""
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
raise ValidationError(
_("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.")
)
for key in self.REQUIRED_KEYS:
if key not in batch_object:
raise ValidationError(_("Key '{key}' not found.").format(key=key))
username = batch_object['username']
user = User.objects.get(username=username)
course_key_obj = self._validate_and_parse_course_key(batch_object['course_key'])
if not CourseEnrollment.is_enrolled(user, course_key_obj):
raise ValidationError(_('User is not enrolled in course.'))
blocks = batch_object['blocks']
block_objs = []
for block_key in blocks:
block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj)
completion = float(blocks[block_key])
block_objs.append((block_key_obj, completion))
return user, course_key_obj, block_objs
def _validate_and_parse_course_key(self, course_key):
"""
Returns a validated parsed CourseKey deserialized from the given course_key.
"""
try:
return CourseKey.from_string(course_key)
except InvalidKeyError:
raise ValidationError(_("Invalid course key: {}").format(course_key))
def _validate_and_parse_block_key(self, block_key, course_key_obj):
"""
Returns a validated, parsed UsageKey deserialized from the given block_key.
"""
try:
block_key_obj = UsageKey.from_string(block_key)
except InvalidKeyError:
raise ValidationError(_("Invalid block key: {}").format(block_key))
if block_key_obj.run is None:
expected_matching_course_key = course_key_obj.replace(run=None)
else:
expected_matching_course_key = course_key_obj
if block_key_obj.course_key != expected_matching_course_key:
raise ValidationError(
_("Block with key: '{key}' is not in course {course}").format(key=block_key, course=course_key_obj)
)
return block_key_obj
|
edx/completion | completion/api/v1/views.py | SubsectionCompletionView.get | python | def get(self, request, username, course_key, subsection_id):
def get_completion(course_completions, all_blocks, block_id):
"""
Recursively get the aggregate completion for a subsection,
given the subsection block and a list of all blocks.
Parameters:
course_completions: a dictionary of completion values by block IDs
all_blocks: a dictionary of the block structure for a subsection
block_id: an ID of a block for which to get completion
"""
block = all_blocks.get(block_id)
child_ids = block.get('children', [])
if not child_ids:
return course_completions.get(block.serializer.instance, 0)
completion = 0
total_children = 0
for child_id in child_ids:
completion += get_completion(course_completions, all_blocks, child_id)
total_children += 1
return int(completion == total_children)
user_id = User.objects.get(username=username).id
block_types_filter = [
'course',
'chapter',
'sequential',
'vertical',
'html',
'problem',
'video',
'discussion',
'drag-and-drop-v2'
]
blocks = get_blocks(
request,
UsageKey.from_string(subsection_id),
nav_depth=2,
requested_fields=[
'children'
],
block_types_filter=block_types_filter
)
course_completions = BlockCompletion.get_course_completions(user_id, CourseKey.from_string(course_key))
aggregated_completion = get_completion(course_completions, blocks['blocks'], blocks['root'])
return Response({"completion": aggregated_completion}, status=status.HTTP_200_OK) | Returns completion for a (user, subsection, course). | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/api/v1/views.py#L199-L252 | [
"def get_completion(course_completions, all_blocks, block_id):\n \"\"\"\n Recursively get the aggregate completion for a subsection,\n given the subsection block and a list of all blocks.\n\n Parameters:\n course_completions: a dictionary of completion values by block IDs\n all_blocks: a dictionary of the block structure for a subsection\n block_id: an ID of a block for which to get completion\n \"\"\"\n block = all_blocks.get(block_id)\n child_ids = block.get('children', [])\n if not child_ids:\n return course_completions.get(block.serializer.instance, 0)\n\n completion = 0\n total_children = 0\n for child_id in child_ids:\n completion += get_completion(course_completions, all_blocks, child_id)\n total_children += 1\n\n return int(completion == total_children)\n"
] | class SubsectionCompletionView(APIView):
"""
Handles API endpoints for the milestones experiments.
TODO: EDUCATOR-2358 Remove this class after the
milestones experiment is no longer running.
"""
authentication_classes = (JwtAuthentication, SessionAuthenticationAllowInactiveUser,)
permission_classes = (permissions.IsAuthenticated, IsUserInUrl)
|
edx/completion | completion/services.py | CompletionService.get_completions | python | def get_completions(self, candidates):
queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter(
block_key__in=candidates
)
completions = BlockCompletion.completion_by_block_key(queryset)
candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates]
for candidate in candidates_with_runs:
if candidate not in completions:
completions[candidate] = 0.0
return completions | Given an iterable collection of block_keys in the course, returns a
mapping of the block_keys to the present completion values of their
associated blocks.
If a completion is not found for a given block in the current course,
0.0 is returned. The service does not attempt to verify that the block
exists within the course.
Parameters:
candidates: collection of BlockKeys within the current course.
Note: Usage keys may not have the course run filled in for old mongo courses.
This method checks for completion records against a set of BlockKey candidates with the course run
filled in from self._course_key.
Return value:
dict[BlockKey] -> float: Mapping blocks to their completion value. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L41-L70 | null | class CompletionService(object):
"""
Service for handling completions for a user within a course.
Exposes
* self.completion_tracking_enabled() -> bool
* self.get_completions(candidates)
* self.vertical_is_complete(vertical_item)
Constructor takes a user object and course_key as arguments.
"""
def __init__(self, user, course_key):
self._user = user
self._course_key = course_key
def completion_tracking_enabled(self):
"""
Exposes ENABLE_COMPLETION_TRACKING waffle switch to XModule runtime
Return value:
bool -> True if completion tracking is enabled.
"""
return waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)
def vertical_is_complete(self, item):
"""
Calculates and returns whether a particular vertical is complete.
The logic in this method is temporary, and will go away once the
completion API is able to store a first-order notion of completeness
for parent blocks (right now it just stores completion for leaves-
problems, HTML, video, etc.).
"""
if item.location.block_type != 'vertical':
raise ValueError('The passed in xblock is not a vertical type!')
if not self.completion_tracking_enabled():
return None
# this is temporary local logic and will be removed when the whole course tree is included in completion
child_locations = [
child.location for child in item.get_children() if child.location.block_type != 'discussion'
]
completions = self.get_completions(child_locations)
for child_location in child_locations:
if completions[child_location] < 1.0:
return False
return True
def get_complete_on_view_delay_ms(self):
"""
Do not mark blocks complete-on-view until they have been visible for
the returned amount of time, in milliseconds. Defaults to 5000.
"""
return getattr(settings, 'COMPLETION_BY_VIEWING_DELAY_MS', 5000)
def can_mark_block_complete_on_view(self, block):
"""
Returns True if the xblock can be marked complete on view.
This is true of any non-customized, non-scorable, completable block.
"""
return (
XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE
and not getattr(block, 'has_custom_completion', False)
and not getattr(block, 'has_score', False)
)
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0}
def submit_group_completion(self, block_key, completion, users=None, user_ids=None):
"""
Submit a completion for a group of users.
Arguments:
block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for.
completion (float): A value in the range [0.0, 1.0]
users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block.
user_ids ([int]): An optional iterable of ids of Users that completed the block.
Returns a list of (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
if users is None:
users = []
if user_ids is None:
user_ids = []
more_users = User.objects.filter(id__in=user_ids)
if len(more_users) < len(user_ids):
found_ids = {u.id for u in more_users}
not_found_ids = [pk for pk in user_ids if pk not in found_ids]
raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids))
users.extend(more_users)
submitted = []
for user in users:
submitted.append(BlockCompletion.objects.submit_completion(
user=user,
course_key=self._course_key,
block_key=block_key,
completion=completion
))
return submitted
def submit_completion(self, block_key, completion):
"""
Submit a completion for the service user and course.
Returns a (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
return BlockCompletion.objects.submit_completion(
user=self._user,
course_key=self._course_key,
block_key=block_key,
completion=completion
)
|
edx/completion | completion/services.py | CompletionService.vertical_is_complete | python | def vertical_is_complete(self, item):
if item.location.block_type != 'vertical':
raise ValueError('The passed in xblock is not a vertical type!')
if not self.completion_tracking_enabled():
return None
# this is temporary local logic and will be removed when the whole course tree is included in completion
child_locations = [
child.location for child in item.get_children() if child.location.block_type != 'discussion'
]
completions = self.get_completions(child_locations)
for child_location in child_locations:
if completions[child_location] < 1.0:
return False
return True | Calculates and returns whether a particular vertical is complete.
The logic in this method is temporary, and will go away once the
completion API is able to store a first-order notion of completeness
for parent blocks (right now it just stores completion for leaves-
problems, HTML, video, etc.). | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L72-L94 | [
"def completion_tracking_enabled(self):\n \"\"\"\n Exposes ENABLE_COMPLETION_TRACKING waffle switch to XModule runtime\n\n Return value:\n\n bool -> True if completion tracking is enabled.\n \"\"\"\n return waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)\n"
] | class CompletionService(object):
"""
Service for handling completions for a user within a course.
Exposes
* self.completion_tracking_enabled() -> bool
* self.get_completions(candidates)
* self.vertical_is_complete(vertical_item)
Constructor takes a user object and course_key as arguments.
"""
def __init__(self, user, course_key):
self._user = user
self._course_key = course_key
def completion_tracking_enabled(self):
"""
Exposes ENABLE_COMPLETION_TRACKING waffle switch to XModule runtime
Return value:
bool -> True if completion tracking is enabled.
"""
return waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)
def get_completions(self, candidates):
"""
Given an iterable collection of block_keys in the course, returns a
mapping of the block_keys to the present completion values of their
associated blocks.
If a completion is not found for a given block in the current course,
0.0 is returned. The service does not attempt to verify that the block
exists within the course.
Parameters:
candidates: collection of BlockKeys within the current course.
Note: Usage keys may not have the course run filled in for old mongo courses.
This method checks for completion records against a set of BlockKey candidates with the course run
filled in from self._course_key.
Return value:
dict[BlockKey] -> float: Mapping blocks to their completion value.
"""
queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter(
block_key__in=candidates
)
completions = BlockCompletion.completion_by_block_key(queryset)
candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates]
for candidate in candidates_with_runs:
if candidate not in completions:
completions[candidate] = 0.0
return completions
def get_complete_on_view_delay_ms(self):
"""
Do not mark blocks complete-on-view until they have been visible for
the returned amount of time, in milliseconds. Defaults to 5000.
"""
return getattr(settings, 'COMPLETION_BY_VIEWING_DELAY_MS', 5000)
def can_mark_block_complete_on_view(self, block):
"""
Returns True if the xblock can be marked complete on view.
This is true of any non-customized, non-scorable, completable block.
"""
return (
XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE
and not getattr(block, 'has_custom_completion', False)
and not getattr(block, 'has_score', False)
)
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0}
def submit_group_completion(self, block_key, completion, users=None, user_ids=None):
"""
Submit a completion for a group of users.
Arguments:
block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for.
completion (float): A value in the range [0.0, 1.0]
users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block.
user_ids ([int]): An optional iterable of ids of Users that completed the block.
Returns a list of (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
if users is None:
users = []
if user_ids is None:
user_ids = []
more_users = User.objects.filter(id__in=user_ids)
if len(more_users) < len(user_ids):
found_ids = {u.id for u in more_users}
not_found_ids = [pk for pk in user_ids if pk not in found_ids]
raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids))
users.extend(more_users)
submitted = []
for user in users:
submitted.append(BlockCompletion.objects.submit_completion(
user=user,
course_key=self._course_key,
block_key=block_key,
completion=completion
))
return submitted
def submit_completion(self, block_key, completion):
"""
Submit a completion for the service user and course.
Returns a (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
return BlockCompletion.objects.submit_completion(
user=self._user,
course_key=self._course_key,
block_key=block_key,
completion=completion
)
|
edx/completion | completion/services.py | CompletionService.can_mark_block_complete_on_view | python | def can_mark_block_complete_on_view(self, block):
return (
XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE
and not getattr(block, 'has_custom_completion', False)
and not getattr(block, 'has_score', False)
) | Returns True if the xblock can be marked complete on view.
This is true of any non-customized, non-scorable, completable block. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L103-L112 | null | class CompletionService(object):
"""
Service for handling completions for a user within a course.
Exposes
* self.completion_tracking_enabled() -> bool
* self.get_completions(candidates)
* self.vertical_is_complete(vertical_item)
Constructor takes a user object and course_key as arguments.
"""
def __init__(self, user, course_key):
self._user = user
self._course_key = course_key
def completion_tracking_enabled(self):
"""
Exposes ENABLE_COMPLETION_TRACKING waffle switch to XModule runtime
Return value:
bool -> True if completion tracking is enabled.
"""
return waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)
def get_completions(self, candidates):
"""
Given an iterable collection of block_keys in the course, returns a
mapping of the block_keys to the present completion values of their
associated blocks.
If a completion is not found for a given block in the current course,
0.0 is returned. The service does not attempt to verify that the block
exists within the course.
Parameters:
candidates: collection of BlockKeys within the current course.
Note: Usage keys may not have the course run filled in for old mongo courses.
This method checks for completion records against a set of BlockKey candidates with the course run
filled in from self._course_key.
Return value:
dict[BlockKey] -> float: Mapping blocks to their completion value.
"""
queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter(
block_key__in=candidates
)
completions = BlockCompletion.completion_by_block_key(queryset)
candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates]
for candidate in candidates_with_runs:
if candidate not in completions:
completions[candidate] = 0.0
return completions
def vertical_is_complete(self, item):
"""
Calculates and returns whether a particular vertical is complete.
The logic in this method is temporary, and will go away once the
completion API is able to store a first-order notion of completeness
for parent blocks (right now it just stores completion for leaves-
problems, HTML, video, etc.).
"""
if item.location.block_type != 'vertical':
raise ValueError('The passed in xblock is not a vertical type!')
if not self.completion_tracking_enabled():
return None
# this is temporary local logic and will be removed when the whole course tree is included in completion
child_locations = [
child.location for child in item.get_children() if child.location.block_type != 'discussion'
]
completions = self.get_completions(child_locations)
for child_location in child_locations:
if completions[child_location] < 1.0:
return False
return True
def get_complete_on_view_delay_ms(self):
"""
Do not mark blocks complete-on-view until they have been visible for
the returned amount of time, in milliseconds. Defaults to 5000.
"""
return getattr(settings, 'COMPLETION_BY_VIEWING_DELAY_MS', 5000)
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0}
def submit_group_completion(self, block_key, completion, users=None, user_ids=None):
"""
Submit a completion for a group of users.
Arguments:
block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for.
completion (float): A value in the range [0.0, 1.0]
users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block.
user_ids ([int]): An optional iterable of ids of Users that completed the block.
Returns a list of (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
if users is None:
users = []
if user_ids is None:
user_ids = []
more_users = User.objects.filter(id__in=user_ids)
if len(more_users) < len(user_ids):
found_ids = {u.id for u in more_users}
not_found_ids = [pk for pk in user_ids if pk not in found_ids]
raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids))
users.extend(more_users)
submitted = []
for user in users:
submitted.append(BlockCompletion.objects.submit_completion(
user=user,
course_key=self._course_key,
block_key=block_key,
completion=completion
))
return submitted
def submit_completion(self, block_key, completion):
"""
Submit a completion for the service user and course.
Returns a (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
return BlockCompletion.objects.submit_completion(
user=self._user,
course_key=self._course_key,
block_key=block_key,
completion=completion
)
|
edx/completion | completion/services.py | CompletionService.blocks_to_mark_complete_on_view | python | def blocks_to_mark_complete_on_view(self, blocks):
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0} | Returns a set of blocks which should be marked complete on view and haven't been yet. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L114-L120 | [
"def get_completions(self, candidates):\n \"\"\"\n Given an iterable collection of block_keys in the course, returns a\n mapping of the block_keys to the present completion values of their\n associated blocks.\n\n If a completion is not found for a given block in the current course,\n 0.0 is returned. The service does not attempt to verify that the block\n exists within the course.\n\n Parameters:\n\n candidates: collection of BlockKeys within the current course.\n Note: Usage keys may not have the course run filled in for old mongo courses.\n This method checks for completion records against a set of BlockKey candidates with the course run\n filled in from self._course_key.\n\n Return value:\n\n dict[BlockKey] -> float: Mapping blocks to their completion value.\n \"\"\"\n queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter(\n block_key__in=candidates\n )\n completions = BlockCompletion.completion_by_block_key(queryset)\n candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates]\n for candidate in candidates_with_runs:\n if candidate not in completions:\n completions[candidate] = 0.0\n return completions\n"
] | class CompletionService(object):
"""
Service for handling completions for a user within a course.
Exposes
* self.completion_tracking_enabled() -> bool
* self.get_completions(candidates)
* self.vertical_is_complete(vertical_item)
Constructor takes a user object and course_key as arguments.
"""
def __init__(self, user, course_key):
self._user = user
self._course_key = course_key
def completion_tracking_enabled(self):
"""
Exposes ENABLE_COMPLETION_TRACKING waffle switch to XModule runtime
Return value:
bool -> True if completion tracking is enabled.
"""
return waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)
def get_completions(self, candidates):
"""
Given an iterable collection of block_keys in the course, returns a
mapping of the block_keys to the present completion values of their
associated blocks.
If a completion is not found for a given block in the current course,
0.0 is returned. The service does not attempt to verify that the block
exists within the course.
Parameters:
candidates: collection of BlockKeys within the current course.
Note: Usage keys may not have the course run filled in for old mongo courses.
This method checks for completion records against a set of BlockKey candidates with the course run
filled in from self._course_key.
Return value:
dict[BlockKey] -> float: Mapping blocks to their completion value.
"""
queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter(
block_key__in=candidates
)
completions = BlockCompletion.completion_by_block_key(queryset)
candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates]
for candidate in candidates_with_runs:
if candidate not in completions:
completions[candidate] = 0.0
return completions
def vertical_is_complete(self, item):
"""
Calculates and returns whether a particular vertical is complete.
The logic in this method is temporary, and will go away once the
completion API is able to store a first-order notion of completeness
for parent blocks (right now it just stores completion for leaves-
problems, HTML, video, etc.).
"""
if item.location.block_type != 'vertical':
raise ValueError('The passed in xblock is not a vertical type!')
if not self.completion_tracking_enabled():
return None
# this is temporary local logic and will be removed when the whole course tree is included in completion
child_locations = [
child.location for child in item.get_children() if child.location.block_type != 'discussion'
]
completions = self.get_completions(child_locations)
for child_location in child_locations:
if completions[child_location] < 1.0:
return False
return True
def get_complete_on_view_delay_ms(self):
"""
Do not mark blocks complete-on-view until they have been visible for
the returned amount of time, in milliseconds. Defaults to 5000.
"""
return getattr(settings, 'COMPLETION_BY_VIEWING_DELAY_MS', 5000)
def can_mark_block_complete_on_view(self, block):
"""
Returns True if the xblock can be marked complete on view.
This is true of any non-customized, non-scorable, completable block.
"""
return (
XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE
and not getattr(block, 'has_custom_completion', False)
and not getattr(block, 'has_score', False)
)
def submit_group_completion(self, block_key, completion, users=None, user_ids=None):
"""
Submit a completion for a group of users.
Arguments:
block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for.
completion (float): A value in the range [0.0, 1.0]
users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block.
user_ids ([int]): An optional iterable of ids of Users that completed the block.
Returns a list of (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
if users is None:
users = []
if user_ids is None:
user_ids = []
more_users = User.objects.filter(id__in=user_ids)
if len(more_users) < len(user_ids):
found_ids = {u.id for u in more_users}
not_found_ids = [pk for pk in user_ids if pk not in found_ids]
raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids))
users.extend(more_users)
submitted = []
for user in users:
submitted.append(BlockCompletion.objects.submit_completion(
user=user,
course_key=self._course_key,
block_key=block_key,
completion=completion
))
return submitted
def submit_completion(self, block_key, completion):
"""
Submit a completion for the service user and course.
Returns a (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
return BlockCompletion.objects.submit_completion(
user=self._user,
course_key=self._course_key,
block_key=block_key,
completion=completion
)
|
edx/completion | completion/services.py | CompletionService.submit_group_completion | python | def submit_group_completion(self, block_key, completion, users=None, user_ids=None):
if users is None:
users = []
if user_ids is None:
user_ids = []
more_users = User.objects.filter(id__in=user_ids)
if len(more_users) < len(user_ids):
found_ids = {u.id for u in more_users}
not_found_ids = [pk for pk in user_ids if pk not in found_ids]
raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids))
users.extend(more_users)
submitted = []
for user in users:
submitted.append(BlockCompletion.objects.submit_completion(
user=user,
course_key=self._course_key,
block_key=block_key,
completion=completion
))
return submitted | Submit a completion for a group of users.
Arguments:
block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for.
completion (float): A value in the range [0.0, 1.0]
users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block.
user_ids ([int]): An optional iterable of ids of Users that completed the block.
Returns a list of (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L122-L155 | null | class CompletionService(object):
"""
Service for handling completions for a user within a course.
Exposes
* self.completion_tracking_enabled() -> bool
* self.get_completions(candidates)
* self.vertical_is_complete(vertical_item)
Constructor takes a user object and course_key as arguments.
"""
def __init__(self, user, course_key):
self._user = user
self._course_key = course_key
def completion_tracking_enabled(self):
"""
Exposes ENABLE_COMPLETION_TRACKING waffle switch to XModule runtime
Return value:
bool -> True if completion tracking is enabled.
"""
return waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)
def get_completions(self, candidates):
"""
Given an iterable collection of block_keys in the course, returns a
mapping of the block_keys to the present completion values of their
associated blocks.
If a completion is not found for a given block in the current course,
0.0 is returned. The service does not attempt to verify that the block
exists within the course.
Parameters:
candidates: collection of BlockKeys within the current course.
Note: Usage keys may not have the course run filled in for old mongo courses.
This method checks for completion records against a set of BlockKey candidates with the course run
filled in from self._course_key.
Return value:
dict[BlockKey] -> float: Mapping blocks to their completion value.
"""
queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter(
block_key__in=candidates
)
completions = BlockCompletion.completion_by_block_key(queryset)
candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates]
for candidate in candidates_with_runs:
if candidate not in completions:
completions[candidate] = 0.0
return completions
def vertical_is_complete(self, item):
"""
Calculates and returns whether a particular vertical is complete.
The logic in this method is temporary, and will go away once the
completion API is able to store a first-order notion of completeness
for parent blocks (right now it just stores completion for leaves-
problems, HTML, video, etc.).
"""
if item.location.block_type != 'vertical':
raise ValueError('The passed in xblock is not a vertical type!')
if not self.completion_tracking_enabled():
return None
# this is temporary local logic and will be removed when the whole course tree is included in completion
child_locations = [
child.location for child in item.get_children() if child.location.block_type != 'discussion'
]
completions = self.get_completions(child_locations)
for child_location in child_locations:
if completions[child_location] < 1.0:
return False
return True
def get_complete_on_view_delay_ms(self):
"""
Do not mark blocks complete-on-view until they have been visible for
the returned amount of time, in milliseconds. Defaults to 5000.
"""
return getattr(settings, 'COMPLETION_BY_VIEWING_DELAY_MS', 5000)
def can_mark_block_complete_on_view(self, block):
"""
Returns True if the xblock can be marked complete on view.
This is true of any non-customized, non-scorable, completable block.
"""
return (
XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE
and not getattr(block, 'has_custom_completion', False)
and not getattr(block, 'has_score', False)
)
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0}
def submit_completion(self, block_key, completion):
"""
Submit a completion for the service user and course.
Returns a (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
return BlockCompletion.objects.submit_completion(
user=self._user,
course_key=self._course_key,
block_key=block_key,
completion=completion
)
|
edx/completion | completion/services.py | CompletionService.submit_completion | python | def submit_completion(self, block_key, completion):
return BlockCompletion.objects.submit_completion(
user=self._user,
course_key=self._course_key,
block_key=block_key,
completion=completion
) | Submit a completion for the service user and course.
Returns a (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L157-L169 | null | class CompletionService(object):
"""
Service for handling completions for a user within a course.
Exposes
* self.completion_tracking_enabled() -> bool
* self.get_completions(candidates)
* self.vertical_is_complete(vertical_item)
Constructor takes a user object and course_key as arguments.
"""
def __init__(self, user, course_key):
self._user = user
self._course_key = course_key
def completion_tracking_enabled(self):
"""
Exposes ENABLE_COMPLETION_TRACKING waffle switch to XModule runtime
Return value:
bool -> True if completion tracking is enabled.
"""
return waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)
def get_completions(self, candidates):
"""
Given an iterable collection of block_keys in the course, returns a
mapping of the block_keys to the present completion values of their
associated blocks.
If a completion is not found for a given block in the current course,
0.0 is returned. The service does not attempt to verify that the block
exists within the course.
Parameters:
candidates: collection of BlockKeys within the current course.
Note: Usage keys may not have the course run filled in for old mongo courses.
This method checks for completion records against a set of BlockKey candidates with the course run
filled in from self._course_key.
Return value:
dict[BlockKey] -> float: Mapping blocks to their completion value.
"""
queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter(
block_key__in=candidates
)
completions = BlockCompletion.completion_by_block_key(queryset)
candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates]
for candidate in candidates_with_runs:
if candidate not in completions:
completions[candidate] = 0.0
return completions
def vertical_is_complete(self, item):
"""
Calculates and returns whether a particular vertical is complete.
The logic in this method is temporary, and will go away once the
completion API is able to store a first-order notion of completeness
for parent blocks (right now it just stores completion for leaves-
problems, HTML, video, etc.).
"""
if item.location.block_type != 'vertical':
raise ValueError('The passed in xblock is not a vertical type!')
if not self.completion_tracking_enabled():
return None
# this is temporary local logic and will be removed when the whole course tree is included in completion
child_locations = [
child.location for child in item.get_children() if child.location.block_type != 'discussion'
]
completions = self.get_completions(child_locations)
for child_location in child_locations:
if completions[child_location] < 1.0:
return False
return True
def get_complete_on_view_delay_ms(self):
"""
Do not mark blocks complete-on-view until they have been visible for
the returned amount of time, in milliseconds. Defaults to 5000.
"""
return getattr(settings, 'COMPLETION_BY_VIEWING_DELAY_MS', 5000)
def can_mark_block_complete_on_view(self, block):
"""
Returns True if the xblock can be marked complete on view.
This is true of any non-customized, non-scorable, completable block.
"""
return (
XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE
and not getattr(block, 'has_custom_completion', False)
and not getattr(block, 'has_score', False)
)
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0}
def submit_group_completion(self, block_key, completion, users=None, user_ids=None):
"""
Submit a completion for a group of users.
Arguments:
block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for.
completion (float): A value in the range [0.0, 1.0]
users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block.
user_ids ([int]): An optional iterable of ids of Users that completed the block.
Returns a list of (BlockCompletion, bool) where the boolean indicates
whether the given BlockCompletion was newly created.
"""
if users is None:
users = []
if user_ids is None:
user_ids = []
more_users = User.objects.filter(id__in=user_ids)
if len(more_users) < len(user_ids):
found_ids = {u.id for u in more_users}
not_found_ids = [pk for pk in user_ids if pk not in found_ids]
raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids))
users.extend(more_users)
submitted = []
for user in users:
submitted.append(BlockCompletion.objects.submit_completion(
user=user,
course_key=self._course_key,
block_key=block_key,
completion=completion
))
return submitted
|
edx/completion | completion/utilities.py | get_key_to_last_completed_course_block | python | def get_key_to_last_completed_course_block(user, course_key):
last_completed_block = BlockCompletion.get_latest_block_completed(user, course_key)
if last_completed_block is not None:
return last_completed_block.block_key
raise UnavailableCompletionData(course_key) | Returns the last block a "user" completed in a course (stated as "course_key").
raises UnavailableCompletionData when the user has not completed blocks in
the course.
raises UnavailableCompletionData when the visual progress waffle flag is
disabled. | train | https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/utilities.py#L13-L29 | null | """
File is the public API for BlockCompletion. It is the interface that prevents
external users from depending on the BlockCompletion model. Methods working with
the BlockCompletion model should be included here.
"""
from __future__ import unicode_literals, absolute_import
from .exceptions import UnavailableCompletionData
from .models import BlockCompletion
|
gitpython-developers/smmap | smmap/mman.py | WindowCursor._destroy | python | def _destroy(self):
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass | Destruction code to decrement counters | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L55-L72 | [
"def unuse_region(self):\n \"\"\"Unuse the current region. Does nothing if we have no current region\n\n **Note:** the cursor unuses the region automatically upon destruction. It is recommended\n to un-use the region once you are done reading from it in persistent cursors as it\n helps to free up resource more quickly\"\"\"\n if self._region is not None:\n self._region.increment_client_count(-1)\n self._region = None\n"
] | class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current class:`MapRegion` or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destroy()
# END exception handling
# END handle regions
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count()
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def use_region(self, offset=0, size=0, flags=0):
"""Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions"""
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def buffer(self):
"""Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore !"""
return buffer(self._region.buffer(), self._ofs, self._size)
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def includes_ofs(self, ofs):
""":return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work"""
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd()
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd()
|
gitpython-developers/smmap | smmap/mman.py | WindowCursor._copy_from | python | def _copy_from(self, rhs):
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count() | Copy all data from rhs into this instance, handles usage count | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L76-L88 | null | class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current class:`MapRegion` or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destroy()
def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass
# END exception handling
# END handle regions
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def use_region(self, offset=0, size=0, flags=0):
"""Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions"""
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def buffer(self):
"""Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore !"""
return buffer(self._region.buffer(), self._ofs, self._size)
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def includes_ofs(self, ofs):
""":return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work"""
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd()
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd()
|
gitpython-developers/smmap | smmap/mman.py | WindowCursor.use_region | python | def use_region(self, offset=0, size=0, flags=0):
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self | Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L104-L142 | null | class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current class:`MapRegion` or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destroy()
def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass
# END exception handling
# END handle regions
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count()
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def buffer(self):
"""Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore !"""
return buffer(self._region.buffer(), self._ofs, self._size)
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def includes_ofs(self, ofs):
""":return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work"""
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd()
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd()
|
gitpython-developers/smmap | smmap/mman.py | WindowCursor.buffer | python | def buffer(self):
return buffer(self._region.buffer(), self._ofs, self._size) | Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore ! | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L156-L164 | [
"def buffer(obj, offset, size):\n # Actually, for gitpython this is fastest ... .\n return memoryview(obj)[offset:offset+size]\n"
] | class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current class:`MapRegion` or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destroy()
def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass
# END exception handling
# END handle regions
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count()
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def use_region(self, offset=0, size=0, flags=0):
"""Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions"""
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def includes_ofs(self, ofs):
""":return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work"""
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd()
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd()
|
gitpython-developers/smmap | smmap/mman.py | WindowCursor.includes_ofs | python | def includes_ofs(self, ofs):
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size) | :return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L201-L207 | null | class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current class:`MapRegion` or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destroy()
def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass
# END exception handling
# END handle regions
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count()
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def use_region(self, offset=0, size=0, flags=0):
"""Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions"""
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def buffer(self):
"""Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore !"""
return buffer(self._region.buffer(), self._ofs, self._size)
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd()
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd()
|
gitpython-developers/smmap | smmap/mman.py | WindowCursor.path | python | def path(self):
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd() | :return: path of the underlying mapped file
:raise ValueError: if attached path is not a path | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L217-L223 | null | class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current class:`MapRegion` or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destroy()
def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass
# END exception handling
# END handle regions
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count()
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def use_region(self, offset=0, size=0, flags=0):
"""Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions"""
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def buffer(self):
"""Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore !"""
return buffer(self._region.buffer(), self._ofs, self._size)
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def includes_ofs(self, ofs):
""":return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work"""
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd()
|
gitpython-developers/smmap | smmap/mman.py | WindowCursor.fd | python | def fd(self):
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd() | :return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L225-L233 | [
"def string_types():\n if sys.version_info[0] >= 3:\n return str\n else:\n return basestring\n"
] | class WindowCursor(object):
"""
Pointer into the mapped region of the memory manager, keeping the map
alive until it is destroyed and no other client uses it.
Cursors should not be created manually, but are instead returned by the SlidingWindowMapManager
**Note:**: The current implementation is suited for static and sliding window managers, but it also means
that it must be suited for the somewhat quite different sliding manager. It could be improved, but
I see no real need to do so."""
__slots__ = (
'_manager', # the manger keeping all file regions
'_rlist', # a regions list with regions for our file
'_region', # our current class:`MapRegion` or None
'_ofs', # relative offset from the actually mapped area to our start area
'_size' # maximum size we should provide
)
def __init__(self, manager=None, regions=None):
self._manager = manager
self._rlist = regions
self._region = None
self._ofs = 0
self._size = 0
def __del__(self):
self._destroy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destroy()
def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass
# END exception handling
# END handle regions
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count()
# END handle regions
def __copy__(self):
"""copy module interface"""
cpy = type(self)()
cpy._copy_from(self)
return cpy
#{ Interface
def assign(self, rhs):
"""Assign rhs to this instance. This is required in order to get a real copy.
Alternativly, you can copy an existing instance using the copy module"""
self._destroy()
self._copy_from(rhs)
def use_region(self, offset=0, size=0, flags=0):
"""Assure we point to a window which allows access to the given offset into the file
:param offset: absolute offset in bytes into the file
:param size: amount of bytes to map. If 0, all available bytes will be mapped
:param flags: additional flags to be given to os.open in case a file handle is initially opened
for mapping. Has no effect if a region can actually be reused.
:return: this instance - it should be queried for whether it points to a valid memory region.
This is not the case if the mapping failed because we reached the end of the file
**Note:**: The size actually mapped may be smaller than the given size. If that is the case,
either the file has reached its end, or the map was created between two existing regions"""
need_region = True
man = self._manager
fsize = self._rlist.file_size()
size = min(size or fsize, man.window_size() or fsize) # clamp size to window size
if self._region is not None:
if self._region.includes_ofs(offset):
need_region = False
else:
self.unuse_region()
# END handle existing region
# END check existing region
# offset too large ?
if offset >= fsize:
return self
# END handle offset
if need_region:
self._region = man._obtain_region(self._rlist, offset, size, flags, False)
self._region.increment_client_count()
# END need region handling
self._ofs = offset - self._region._b
self._size = min(size, self._region.ofs_end() - offset)
return self
def unuse_region(self):
"""Unuse the current region. Does nothing if we have no current region
**Note:** the cursor unuses the region automatically upon destruction. It is recommended
to un-use the region once you are done reading from it in persistent cursors as it
helps to free up resource more quickly"""
if self._region is not None:
self._region.increment_client_count(-1)
self._region = None
# note: should reset ofs and size, but we spare that for performance. Its not
# allowed to query information if we are not valid !
def buffer(self):
"""Return a buffer object which allows access to our memory region from our offset
to the window size. Please note that it might be smaller than you requested when calling use_region()
**Note:** You can only obtain a buffer if this instance is_valid() !
**Note:** buffers should not be cached passed the duration of your access as it will
prevent resources from being freed even though they might not be accounted for anymore !"""
return buffer(self._region.buffer(), self._ofs, self._size)
def map(self):
"""
:return: the underlying raw memory map. Please not that the offset and size is likely to be different
to what you set as offset and size. Use it only if you are sure about the region it maps, which is the whole
file in case of StaticWindowMapManager"""
return self._region.map()
def is_valid(self):
""":return: True if we have a valid and usable region"""
return self._region is not None
def is_associated(self):
""":return: True if we are associated with a specific file already"""
return self._rlist is not None
def ofs_begin(self):
""":return: offset to the first byte pointed to by our cursor
**Note:** only if is_valid() is True"""
return self._region._b + self._ofs
def ofs_end(self):
""":return: offset to one past the last available byte"""
# unroll method calls for performance !
return self._region._b + self._ofs + self._size
def size(self):
""":return: amount of bytes we point to"""
return self._size
def region(self):
""":return: our mapped region, or None if nothing is mapped yet
:raise AssertionError: if we have no current region. This is only useful for debugging"""
return self._region
def includes_ofs(self, ofs):
""":return: True if the given absolute offset is contained in the cursors
current region
**Note:** cursor must be valid for this to work"""
# unroll methods
return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def file_size(self):
""":return: size of the underlying file"""
return self._rlist.file_size()
def path_or_fd(self):
""":return: path or file descriptor of the underlying mapped file"""
return self._rlist.path_or_fd()
def path(self):
""":return: path of the underlying mapped file
:raise ValueError: if attached path is not a path"""
if isinstance(self._rlist.path_or_fd(), int):
raise ValueError("Path queried although mapping was applied to a file descriptor")
# END handle type
return self._rlist.path_or_fd()
|
gitpython-developers/smmap | smmap/mman.py | StaticWindowMapManager._collect_lru_region | python | def _collect_lru_region(self, size):
num_found = 0
while (size == 0) or (self._memory_size + size > self._max_memory_size):
lru_region = None
lru_list = None
for regions in self._fdict.values():
for region in regions:
# check client count - if it's 1, it's just us
if (region.client_count() == 1 and
(lru_region is None or region._uc < lru_region._uc)):
lru_region = region
lru_list = regions
# END update lru_region
# END for each region
# END for each regions list
if lru_region is None:
break
# END handle region not found
num_found += 1
del(lru_list[lru_list.index(lru_region)])
lru_region.increment_client_count(-1)
self._memory_size -= lru_region.size()
self._handle_count -= 1
# END while there is more memory to free
return num_found | Unmap the region which was least-recently used and has no client
:param size: size of the region we want to map next (assuming its not already mapped partially or full
if 0, we try to free any available region
:return: Amount of freed regions
.. Note::
We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation.
If the system runs out of memory, it will tell.
.. TODO::
implement a case where all unusued regions are discarded efficiently.
Currently its only brute force | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L305-L344 | null | class StaticWindowMapManager(object):
"""Provides a manager which will produce single size cursors that are allowed
to always map the whole file.
Clients must be written to specifically know that they are accessing their data
through a StaticWindowMapManager, as they otherwise have to deal with their window size.
These clients would have to use a SlidingWindowMapBuffer to hide this fact.
This type will always use a maximum window size, and optimize certain methods to
accommodate this fact"""
__slots__ = [
'_fdict', # mapping of path -> StorageHelper (of some kind
'_window_size', # maximum size of a window
'_max_memory_size', # maximum amount of memory we may allocate
'_max_handle_count', # maximum amount of handles to keep open
'_memory_size', # currently allocated memory size
'_handle_count', # amount of currently allocated file handles
]
#{ Configuration
MapRegionListCls = MapRegionList
MapWindowCls = MapWindow
MapRegionCls = MapRegion
WindowCursorCls = WindowCursor
#} END configuration
_MB_in_bytes = 1024 * 1024
def __init__(self, window_size=0, max_memory_size=0, max_open_handles=sys.maxsize):
"""initialize the manager with the given parameters.
:param window_size: if -1, a default window size will be chosen depending on
the operating system's architecture. It will internally be quantified to a multiple of the page size
If 0, the window may have any size, which basically results in mapping the whole file at one
:param max_memory_size: maximum amount of memory we may map at once before releasing mapped regions.
If 0, a viable default will be set depending on the system's architecture.
It is a soft limit that is tried to be kept, but nothing bad happens if we have to over-allocate
:param max_open_handles: if not maxint, limit the amount of open file handles to the given number.
Otherwise the amount is only limited by the system itself. If a system or soft limit is hit,
the manager will free as many handles as possible"""
self._fdict = dict()
self._window_size = window_size
self._max_memory_size = max_memory_size
self._max_handle_count = max_open_handles
self._memory_size = 0
self._handle_count = 0
if window_size < 0:
coeff = 64
if is_64_bit():
coeff = 1024
# END handle arch
self._window_size = coeff * self._MB_in_bytes
# END handle max window size
if max_memory_size == 0:
coeff = 1024
if is_64_bit():
coeff = 8192
# END handle arch
self._max_memory_size = coeff * self._MB_in_bytes
# END handle max memory size
#{ Internal Methods
def _obtain_region(self, a, offset, size, flags, is_recursive):
"""Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region"""
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r
#}END internal methods
#{ Interface
def make_cursor(self, path_or_fd):
"""
:return: a cursor pointing to the given path or file descriptor.
It can be used to map new regions of the file into memory
**Note:** if a file descriptor is given, it is assumed to be open and valid,
but may be closed afterwards. To refer to the same file, you may reuse
your existing file descriptor, but keep in mind that new windows can only
be mapped as long as it stays valid. This is why the using actual file paths
are preferred unless you plan to keep the file descriptor open.
**Note:** file descriptors are problematic as they are not necessarily unique, as two
different files opened and closed in succession might have the same file descriptor id.
**Note:** Using file descriptors directly is faster once new windows are mapped as it
prevents the file to be opened again just for the purpose of mapping it."""
regions = self._fdict.get(path_or_fd)
if regions is None:
regions = self.MapRegionListCls(path_or_fd)
self._fdict[path_or_fd] = regions
# END obtain region for path
return self.WindowCursorCls(self, regions)
def collect(self):
"""Collect all available free-to-collect mapped regions
:return: Amount of freed handles"""
return self._collect_lru_region(0)
def num_file_handles(self):
""":return: amount of file handles in use. Each mapped region uses one file handle"""
return self._handle_count
def num_open_files(self):
"""Amount of opened files in the system"""
return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0)
def window_size(self):
""":return: size of each window when allocating new regions"""
return self._window_size
def mapped_memory_size(self):
""":return: amount of bytes currently mapped in total"""
return self._memory_size
def max_file_handles(self):
""":return: maximium amount of handles we may have opened"""
return self._max_handle_count
def max_mapped_memory_size(self):
""":return: maximum amount of memory we may allocate"""
return self._max_memory_size
#} END interface
#{ Special Purpose Interface
def force_map_handle_removal_win(self, base_path):
"""ONLY AVAILABLE ON WINDOWS
On windows removing files is not allowed if anybody still has it opened.
If this process is ourselves, and if the whole process uses this memory
manager (as far as the parent framework is concerned) we can enforce
closing all memory maps whose path matches the given base path to
allow the respective operation after all.
The respective system must NOT access the closed memory regions anymore !
This really may only be used if you know that the items which keep
the cursors alive will not be using it anymore. They need to be recreated !
:return: Amount of closed handles
**Note:** does nothing on non-windows platforms"""
if sys.platform != 'win32':
return
# END early bailout
num_closed = 0
for path, rlist in self._fdict.items():
if path.startswith(base_path):
for region in rlist:
region.release()
num_closed += 1
# END path matches
# END for each path
return num_closed
|
gitpython-developers/smmap | smmap/mman.py | StaticWindowMapManager._obtain_region | python | def _obtain_region(self, a, offset, size, flags, is_recursive):
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r | Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L346-L382 | null | class StaticWindowMapManager(object):
"""Provides a manager which will produce single size cursors that are allowed
to always map the whole file.
Clients must be written to specifically know that they are accessing their data
through a StaticWindowMapManager, as they otherwise have to deal with their window size.
These clients would have to use a SlidingWindowMapBuffer to hide this fact.
This type will always use a maximum window size, and optimize certain methods to
accommodate this fact"""
__slots__ = [
'_fdict', # mapping of path -> StorageHelper (of some kind
'_window_size', # maximum size of a window
'_max_memory_size', # maximum amount of memory we may allocate
'_max_handle_count', # maximum amount of handles to keep open
'_memory_size', # currently allocated memory size
'_handle_count', # amount of currently allocated file handles
]
#{ Configuration
MapRegionListCls = MapRegionList
MapWindowCls = MapWindow
MapRegionCls = MapRegion
WindowCursorCls = WindowCursor
#} END configuration
_MB_in_bytes = 1024 * 1024
def __init__(self, window_size=0, max_memory_size=0, max_open_handles=sys.maxsize):
"""initialize the manager with the given parameters.
:param window_size: if -1, a default window size will be chosen depending on
the operating system's architecture. It will internally be quantified to a multiple of the page size
If 0, the window may have any size, which basically results in mapping the whole file at one
:param max_memory_size: maximum amount of memory we may map at once before releasing mapped regions.
If 0, a viable default will be set depending on the system's architecture.
It is a soft limit that is tried to be kept, but nothing bad happens if we have to over-allocate
:param max_open_handles: if not maxint, limit the amount of open file handles to the given number.
Otherwise the amount is only limited by the system itself. If a system or soft limit is hit,
the manager will free as many handles as possible"""
self._fdict = dict()
self._window_size = window_size
self._max_memory_size = max_memory_size
self._max_handle_count = max_open_handles
self._memory_size = 0
self._handle_count = 0
if window_size < 0:
coeff = 64
if is_64_bit():
coeff = 1024
# END handle arch
self._window_size = coeff * self._MB_in_bytes
# END handle max window size
if max_memory_size == 0:
coeff = 1024
if is_64_bit():
coeff = 8192
# END handle arch
self._max_memory_size = coeff * self._MB_in_bytes
# END handle max memory size
#{ Internal Methods
def _collect_lru_region(self, size):
"""Unmap the region which was least-recently used and has no client
:param size: size of the region we want to map next (assuming its not already mapped partially or full
if 0, we try to free any available region
:return: Amount of freed regions
.. Note::
We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation.
If the system runs out of memory, it will tell.
.. TODO::
implement a case where all unusued regions are discarded efficiently.
Currently its only brute force
"""
num_found = 0
while (size == 0) or (self._memory_size + size > self._max_memory_size):
lru_region = None
lru_list = None
for regions in self._fdict.values():
for region in regions:
# check client count - if it's 1, it's just us
if (region.client_count() == 1 and
(lru_region is None or region._uc < lru_region._uc)):
lru_region = region
lru_list = regions
# END update lru_region
# END for each region
# END for each regions list
if lru_region is None:
break
# END handle region not found
num_found += 1
del(lru_list[lru_list.index(lru_region)])
lru_region.increment_client_count(-1)
self._memory_size -= lru_region.size()
self._handle_count -= 1
# END while there is more memory to free
return num_found
#}END internal methods
#{ Interface
def make_cursor(self, path_or_fd):
"""
:return: a cursor pointing to the given path or file descriptor.
It can be used to map new regions of the file into memory
**Note:** if a file descriptor is given, it is assumed to be open and valid,
but may be closed afterwards. To refer to the same file, you may reuse
your existing file descriptor, but keep in mind that new windows can only
be mapped as long as it stays valid. This is why the using actual file paths
are preferred unless you plan to keep the file descriptor open.
**Note:** file descriptors are problematic as they are not necessarily unique, as two
different files opened and closed in succession might have the same file descriptor id.
**Note:** Using file descriptors directly is faster once new windows are mapped as it
prevents the file to be opened again just for the purpose of mapping it."""
regions = self._fdict.get(path_or_fd)
if regions is None:
regions = self.MapRegionListCls(path_or_fd)
self._fdict[path_or_fd] = regions
# END obtain region for path
return self.WindowCursorCls(self, regions)
def collect(self):
"""Collect all available free-to-collect mapped regions
:return: Amount of freed handles"""
return self._collect_lru_region(0)
def num_file_handles(self):
""":return: amount of file handles in use. Each mapped region uses one file handle"""
return self._handle_count
def num_open_files(self):
"""Amount of opened files in the system"""
return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0)
def window_size(self):
""":return: size of each window when allocating new regions"""
return self._window_size
def mapped_memory_size(self):
""":return: amount of bytes currently mapped in total"""
return self._memory_size
def max_file_handles(self):
""":return: maximium amount of handles we may have opened"""
return self._max_handle_count
def max_mapped_memory_size(self):
""":return: maximum amount of memory we may allocate"""
return self._max_memory_size
#} END interface
#{ Special Purpose Interface
def force_map_handle_removal_win(self, base_path):
"""ONLY AVAILABLE ON WINDOWS
On windows removing files is not allowed if anybody still has it opened.
If this process is ourselves, and if the whole process uses this memory
manager (as far as the parent framework is concerned) we can enforce
closing all memory maps whose path matches the given base path to
allow the respective operation after all.
The respective system must NOT access the closed memory regions anymore !
This really may only be used if you know that the items which keep
the cursors alive will not be using it anymore. They need to be recreated !
:return: Amount of closed handles
**Note:** does nothing on non-windows platforms"""
if sys.platform != 'win32':
return
# END early bailout
num_closed = 0
for path, rlist in self._fdict.items():
if path.startswith(base_path):
for region in rlist:
region.release()
num_closed += 1
# END path matches
# END for each path
return num_closed
|
gitpython-developers/smmap | smmap/mman.py | StaticWindowMapManager.make_cursor | python | def make_cursor(self, path_or_fd):
regions = self._fdict.get(path_or_fd)
if regions is None:
regions = self.MapRegionListCls(path_or_fd)
self._fdict[path_or_fd] = regions
# END obtain region for path
return self.WindowCursorCls(self, regions) | :return: a cursor pointing to the given path or file descriptor.
It can be used to map new regions of the file into memory
**Note:** if a file descriptor is given, it is assumed to be open and valid,
but may be closed afterwards. To refer to the same file, you may reuse
your existing file descriptor, but keep in mind that new windows can only
be mapped as long as it stays valid. This is why the using actual file paths
are preferred unless you plan to keep the file descriptor open.
**Note:** file descriptors are problematic as they are not necessarily unique, as two
different files opened and closed in succession might have the same file descriptor id.
**Note:** Using file descriptors directly is faster once new windows are mapped as it
prevents the file to be opened again just for the purpose of mapping it. | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L387-L408 | null | class StaticWindowMapManager(object):
"""Provides a manager which will produce single size cursors that are allowed
to always map the whole file.
Clients must be written to specifically know that they are accessing their data
through a StaticWindowMapManager, as they otherwise have to deal with their window size.
These clients would have to use a SlidingWindowMapBuffer to hide this fact.
This type will always use a maximum window size, and optimize certain methods to
accommodate this fact"""
__slots__ = [
'_fdict', # mapping of path -> StorageHelper (of some kind
'_window_size', # maximum size of a window
'_max_memory_size', # maximum amount of memory we may allocate
'_max_handle_count', # maximum amount of handles to keep open
'_memory_size', # currently allocated memory size
'_handle_count', # amount of currently allocated file handles
]
#{ Configuration
MapRegionListCls = MapRegionList
MapWindowCls = MapWindow
MapRegionCls = MapRegion
WindowCursorCls = WindowCursor
#} END configuration
_MB_in_bytes = 1024 * 1024
def __init__(self, window_size=0, max_memory_size=0, max_open_handles=sys.maxsize):
"""initialize the manager with the given parameters.
:param window_size: if -1, a default window size will be chosen depending on
the operating system's architecture. It will internally be quantified to a multiple of the page size
If 0, the window may have any size, which basically results in mapping the whole file at one
:param max_memory_size: maximum amount of memory we may map at once before releasing mapped regions.
If 0, a viable default will be set depending on the system's architecture.
It is a soft limit that is tried to be kept, but nothing bad happens if we have to over-allocate
:param max_open_handles: if not maxint, limit the amount of open file handles to the given number.
Otherwise the amount is only limited by the system itself. If a system or soft limit is hit,
the manager will free as many handles as possible"""
self._fdict = dict()
self._window_size = window_size
self._max_memory_size = max_memory_size
self._max_handle_count = max_open_handles
self._memory_size = 0
self._handle_count = 0
if window_size < 0:
coeff = 64
if is_64_bit():
coeff = 1024
# END handle arch
self._window_size = coeff * self._MB_in_bytes
# END handle max window size
if max_memory_size == 0:
coeff = 1024
if is_64_bit():
coeff = 8192
# END handle arch
self._max_memory_size = coeff * self._MB_in_bytes
# END handle max memory size
#{ Internal Methods
def _collect_lru_region(self, size):
"""Unmap the region which was least-recently used and has no client
:param size: size of the region we want to map next (assuming its not already mapped partially or full
if 0, we try to free any available region
:return: Amount of freed regions
.. Note::
We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation.
If the system runs out of memory, it will tell.
.. TODO::
implement a case where all unusued regions are discarded efficiently.
Currently its only brute force
"""
num_found = 0
while (size == 0) or (self._memory_size + size > self._max_memory_size):
lru_region = None
lru_list = None
for regions in self._fdict.values():
for region in regions:
# check client count - if it's 1, it's just us
if (region.client_count() == 1 and
(lru_region is None or region._uc < lru_region._uc)):
lru_region = region
lru_list = regions
# END update lru_region
# END for each region
# END for each regions list
if lru_region is None:
break
# END handle region not found
num_found += 1
del(lru_list[lru_list.index(lru_region)])
lru_region.increment_client_count(-1)
self._memory_size -= lru_region.size()
self._handle_count -= 1
# END while there is more memory to free
return num_found
def _obtain_region(self, a, offset, size, flags, is_recursive):
"""Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region"""
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r
#}END internal methods
#{ Interface
def collect(self):
"""Collect all available free-to-collect mapped regions
:return: Amount of freed handles"""
return self._collect_lru_region(0)
def num_file_handles(self):
""":return: amount of file handles in use. Each mapped region uses one file handle"""
return self._handle_count
def num_open_files(self):
"""Amount of opened files in the system"""
return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0)
def window_size(self):
""":return: size of each window when allocating new regions"""
return self._window_size
def mapped_memory_size(self):
""":return: amount of bytes currently mapped in total"""
return self._memory_size
def max_file_handles(self):
""":return: maximium amount of handles we may have opened"""
return self._max_handle_count
def max_mapped_memory_size(self):
""":return: maximum amount of memory we may allocate"""
return self._max_memory_size
#} END interface
#{ Special Purpose Interface
def force_map_handle_removal_win(self, base_path):
"""ONLY AVAILABLE ON WINDOWS
On windows removing files is not allowed if anybody still has it opened.
If this process is ourselves, and if the whole process uses this memory
manager (as far as the parent framework is concerned) we can enforce
closing all memory maps whose path matches the given base path to
allow the respective operation after all.
The respective system must NOT access the closed memory regions anymore !
This really may only be used if you know that the items which keep
the cursors alive will not be using it anymore. They need to be recreated !
:return: Amount of closed handles
**Note:** does nothing on non-windows platforms"""
if sys.platform != 'win32':
return
# END early bailout
num_closed = 0
for path, rlist in self._fdict.items():
if path.startswith(base_path):
for region in rlist:
region.release()
num_closed += 1
# END path matches
# END for each path
return num_closed
|
gitpython-developers/smmap | smmap/mman.py | StaticWindowMapManager.num_open_files | python | def num_open_files(self):
return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0) | Amount of opened files in the system | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L419-L421 | null | class StaticWindowMapManager(object):
"""Provides a manager which will produce single size cursors that are allowed
to always map the whole file.
Clients must be written to specifically know that they are accessing their data
through a StaticWindowMapManager, as they otherwise have to deal with their window size.
These clients would have to use a SlidingWindowMapBuffer to hide this fact.
This type will always use a maximum window size, and optimize certain methods to
accommodate this fact"""
__slots__ = [
'_fdict', # mapping of path -> StorageHelper (of some kind
'_window_size', # maximum size of a window
'_max_memory_size', # maximum amount of memory we may allocate
'_max_handle_count', # maximum amount of handles to keep open
'_memory_size', # currently allocated memory size
'_handle_count', # amount of currently allocated file handles
]
#{ Configuration
MapRegionListCls = MapRegionList
MapWindowCls = MapWindow
MapRegionCls = MapRegion
WindowCursorCls = WindowCursor
#} END configuration
_MB_in_bytes = 1024 * 1024
def __init__(self, window_size=0, max_memory_size=0, max_open_handles=sys.maxsize):
"""initialize the manager with the given parameters.
:param window_size: if -1, a default window size will be chosen depending on
the operating system's architecture. It will internally be quantified to a multiple of the page size
If 0, the window may have any size, which basically results in mapping the whole file at one
:param max_memory_size: maximum amount of memory we may map at once before releasing mapped regions.
If 0, a viable default will be set depending on the system's architecture.
It is a soft limit that is tried to be kept, but nothing bad happens if we have to over-allocate
:param max_open_handles: if not maxint, limit the amount of open file handles to the given number.
Otherwise the amount is only limited by the system itself. If a system or soft limit is hit,
the manager will free as many handles as possible"""
self._fdict = dict()
self._window_size = window_size
self._max_memory_size = max_memory_size
self._max_handle_count = max_open_handles
self._memory_size = 0
self._handle_count = 0
if window_size < 0:
coeff = 64
if is_64_bit():
coeff = 1024
# END handle arch
self._window_size = coeff * self._MB_in_bytes
# END handle max window size
if max_memory_size == 0:
coeff = 1024
if is_64_bit():
coeff = 8192
# END handle arch
self._max_memory_size = coeff * self._MB_in_bytes
# END handle max memory size
#{ Internal Methods
def _collect_lru_region(self, size):
"""Unmap the region which was least-recently used and has no client
:param size: size of the region we want to map next (assuming its not already mapped partially or full
if 0, we try to free any available region
:return: Amount of freed regions
.. Note::
We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation.
If the system runs out of memory, it will tell.
.. TODO::
implement a case where all unusued regions are discarded efficiently.
Currently its only brute force
"""
num_found = 0
while (size == 0) or (self._memory_size + size > self._max_memory_size):
lru_region = None
lru_list = None
for regions in self._fdict.values():
for region in regions:
# check client count - if it's 1, it's just us
if (region.client_count() == 1 and
(lru_region is None or region._uc < lru_region._uc)):
lru_region = region
lru_list = regions
# END update lru_region
# END for each region
# END for each regions list
if lru_region is None:
break
# END handle region not found
num_found += 1
del(lru_list[lru_list.index(lru_region)])
lru_region.increment_client_count(-1)
self._memory_size -= lru_region.size()
self._handle_count -= 1
# END while there is more memory to free
return num_found
def _obtain_region(self, a, offset, size, flags, is_recursive):
"""Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region"""
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r
#}END internal methods
#{ Interface
def make_cursor(self, path_or_fd):
"""
:return: a cursor pointing to the given path or file descriptor.
It can be used to map new regions of the file into memory
**Note:** if a file descriptor is given, it is assumed to be open and valid,
but may be closed afterwards. To refer to the same file, you may reuse
your existing file descriptor, but keep in mind that new windows can only
be mapped as long as it stays valid. This is why the using actual file paths
are preferred unless you plan to keep the file descriptor open.
**Note:** file descriptors are problematic as they are not necessarily unique, as two
different files opened and closed in succession might have the same file descriptor id.
**Note:** Using file descriptors directly is faster once new windows are mapped as it
prevents the file to be opened again just for the purpose of mapping it."""
regions = self._fdict.get(path_or_fd)
if regions is None:
regions = self.MapRegionListCls(path_or_fd)
self._fdict[path_or_fd] = regions
# END obtain region for path
return self.WindowCursorCls(self, regions)
def collect(self):
"""Collect all available free-to-collect mapped regions
:return: Amount of freed handles"""
return self._collect_lru_region(0)
def num_file_handles(self):
""":return: amount of file handles in use. Each mapped region uses one file handle"""
return self._handle_count
def window_size(self):
""":return: size of each window when allocating new regions"""
return self._window_size
def mapped_memory_size(self):
""":return: amount of bytes currently mapped in total"""
return self._memory_size
def max_file_handles(self):
""":return: maximium amount of handles we may have opened"""
return self._max_handle_count
def max_mapped_memory_size(self):
""":return: maximum amount of memory we may allocate"""
return self._max_memory_size
#} END interface
#{ Special Purpose Interface
def force_map_handle_removal_win(self, base_path):
"""ONLY AVAILABLE ON WINDOWS
On windows removing files is not allowed if anybody still has it opened.
If this process is ourselves, and if the whole process uses this memory
manager (as far as the parent framework is concerned) we can enforce
closing all memory maps whose path matches the given base path to
allow the respective operation after all.
The respective system must NOT access the closed memory regions anymore !
This really may only be used if you know that the items which keep
the cursors alive will not be using it anymore. They need to be recreated !
:return: Amount of closed handles
**Note:** does nothing on non-windows platforms"""
if sys.platform != 'win32':
return
# END early bailout
num_closed = 0
for path, rlist in self._fdict.items():
if path.startswith(base_path):
for region in rlist:
region.release()
num_closed += 1
# END path matches
# END for each path
return num_closed
|
gitpython-developers/smmap | smmap/mman.py | StaticWindowMapManager.force_map_handle_removal_win | python | def force_map_handle_removal_win(self, base_path):
if sys.platform != 'win32':
return
# END early bailout
num_closed = 0
for path, rlist in self._fdict.items():
if path.startswith(base_path):
for region in rlist:
region.release()
num_closed += 1
# END path matches
# END for each path
return num_closed | ONLY AVAILABLE ON WINDOWS
On windows removing files is not allowed if anybody still has it opened.
If this process is ourselves, and if the whole process uses this memory
manager (as far as the parent framework is concerned) we can enforce
closing all memory maps whose path matches the given base path to
allow the respective operation after all.
The respective system must NOT access the closed memory regions anymore !
This really may only be used if you know that the items which keep
the cursors alive will not be using it anymore. They need to be recreated !
:return: Amount of closed handles
**Note:** does nothing on non-windows platforms | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L443-L468 | null | class StaticWindowMapManager(object):
"""Provides a manager which will produce single size cursors that are allowed
to always map the whole file.
Clients must be written to specifically know that they are accessing their data
through a StaticWindowMapManager, as they otherwise have to deal with their window size.
These clients would have to use a SlidingWindowMapBuffer to hide this fact.
This type will always use a maximum window size, and optimize certain methods to
accommodate this fact"""
__slots__ = [
'_fdict', # mapping of path -> StorageHelper (of some kind
'_window_size', # maximum size of a window
'_max_memory_size', # maximum amount of memory we may allocate
'_max_handle_count', # maximum amount of handles to keep open
'_memory_size', # currently allocated memory size
'_handle_count', # amount of currently allocated file handles
]
#{ Configuration
MapRegionListCls = MapRegionList
MapWindowCls = MapWindow
MapRegionCls = MapRegion
WindowCursorCls = WindowCursor
#} END configuration
_MB_in_bytes = 1024 * 1024
def __init__(self, window_size=0, max_memory_size=0, max_open_handles=sys.maxsize):
"""initialize the manager with the given parameters.
:param window_size: if -1, a default window size will be chosen depending on
the operating system's architecture. It will internally be quantified to a multiple of the page size
If 0, the window may have any size, which basically results in mapping the whole file at one
:param max_memory_size: maximum amount of memory we may map at once before releasing mapped regions.
If 0, a viable default will be set depending on the system's architecture.
It is a soft limit that is tried to be kept, but nothing bad happens if we have to over-allocate
:param max_open_handles: if not maxint, limit the amount of open file handles to the given number.
Otherwise the amount is only limited by the system itself. If a system or soft limit is hit,
the manager will free as many handles as possible"""
self._fdict = dict()
self._window_size = window_size
self._max_memory_size = max_memory_size
self._max_handle_count = max_open_handles
self._memory_size = 0
self._handle_count = 0
if window_size < 0:
coeff = 64
if is_64_bit():
coeff = 1024
# END handle arch
self._window_size = coeff * self._MB_in_bytes
# END handle max window size
if max_memory_size == 0:
coeff = 1024
if is_64_bit():
coeff = 8192
# END handle arch
self._max_memory_size = coeff * self._MB_in_bytes
# END handle max memory size
#{ Internal Methods
def _collect_lru_region(self, size):
"""Unmap the region which was least-recently used and has no client
:param size: size of the region we want to map next (assuming its not already mapped partially or full
if 0, we try to free any available region
:return: Amount of freed regions
.. Note::
We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation.
If the system runs out of memory, it will tell.
.. TODO::
implement a case where all unusued regions are discarded efficiently.
Currently its only brute force
"""
num_found = 0
while (size == 0) or (self._memory_size + size > self._max_memory_size):
lru_region = None
lru_list = None
for regions in self._fdict.values():
for region in regions:
# check client count - if it's 1, it's just us
if (region.client_count() == 1 and
(lru_region is None or region._uc < lru_region._uc)):
lru_region = region
lru_list = regions
# END update lru_region
# END for each region
# END for each regions list
if lru_region is None:
break
# END handle region not found
num_found += 1
del(lru_list[lru_list.index(lru_region)])
lru_region.increment_client_count(-1)
self._memory_size -= lru_region.size()
self._handle_count -= 1
# END while there is more memory to free
return num_found
def _obtain_region(self, a, offset, size, flags, is_recursive):
"""Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region"""
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r
#}END internal methods
#{ Interface
def make_cursor(self, path_or_fd):
"""
:return: a cursor pointing to the given path or file descriptor.
It can be used to map new regions of the file into memory
**Note:** if a file descriptor is given, it is assumed to be open and valid,
but may be closed afterwards. To refer to the same file, you may reuse
your existing file descriptor, but keep in mind that new windows can only
be mapped as long as it stays valid. This is why the using actual file paths
are preferred unless you plan to keep the file descriptor open.
**Note:** file descriptors are problematic as they are not necessarily unique, as two
different files opened and closed in succession might have the same file descriptor id.
**Note:** Using file descriptors directly is faster once new windows are mapped as it
prevents the file to be opened again just for the purpose of mapping it."""
regions = self._fdict.get(path_or_fd)
if regions is None:
regions = self.MapRegionListCls(path_or_fd)
self._fdict[path_or_fd] = regions
# END obtain region for path
return self.WindowCursorCls(self, regions)
def collect(self):
"""Collect all available free-to-collect mapped regions
:return: Amount of freed handles"""
return self._collect_lru_region(0)
def num_file_handles(self):
""":return: amount of file handles in use. Each mapped region uses one file handle"""
return self._handle_count
def num_open_files(self):
"""Amount of opened files in the system"""
return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0)
def window_size(self):
""":return: size of each window when allocating new regions"""
return self._window_size
def mapped_memory_size(self):
""":return: amount of bytes currently mapped in total"""
return self._memory_size
def max_file_handles(self):
""":return: maximium amount of handles we may have opened"""
return self._max_handle_count
def max_mapped_memory_size(self):
""":return: maximum amount of memory we may allocate"""
return self._max_memory_size
#} END interface
#{ Special Purpose Interface
|
gitpython-developers/smmap | smmap/util.py | align_to_mmap | python | def align_to_mmap(num, round_up):
res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY
if round_up and (res != num):
res += ALLOCATIONGRANULARITY
# END handle size
return res | Align the given integer number to the closest page offset, which usually is 4096 bytes.
:param round_up: if True, the next higher multiple of page size is used, otherwise
the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0)
:return: num rounded to closest page | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L32-L43 | null | """Module containing a memory memory manager which provides a sliding window on a number of memory mapped files"""
import os
import sys
from mmap import mmap, ACCESS_READ
from mmap import ALLOCATIONGRANULARITY
__all__ = ["align_to_mmap", "is_64_bit", "buffer",
"MapWindow", "MapRegion", "MapRegionList", "ALLOCATIONGRANULARITY"]
#{ Utilities
try:
# Python 2
buffer = buffer
except NameError:
# Python 3 has no `buffer`; only `memoryview`
def buffer(obj, offset, size):
# Actually, for gitpython this is fastest ... .
return memoryview(obj)[offset:offset+size]
# doing it directly is much faster !
# return obj[offset:offset + size]
def string_types():
if sys.version_info[0] >= 3:
return str
else:
return basestring
def is_64_bit():
""":return: True if the system is 64 bit. Otherwise it can be assumed to be 32 bit"""
return sys.maxsize > (1 << 32) - 1
#}END utilities
#{ Utility Classes
class MapWindow(object):
"""Utility type which is used to snap windows towards each other, and to adjust their size"""
__slots__ = (
'ofs', # offset into the file in bytes
'size' # size of the window in bytes
)
def __init__(self, offset, size):
self.ofs = offset
self.size = size
def __repr__(self):
return "MapWindow(%i, %i)" % (self.ofs, self.size)
@classmethod
def from_region(cls, region):
""":return: new window from a region"""
return cls(region._b, region.size())
def ofs_end(self):
return self.ofs + self.size
def align(self):
"""Assures the previous window area is contained in the new one"""
nofs = align_to_mmap(self.ofs, 0)
self.size += self.ofs - nofs # keep size constant
self.ofs = nofs
self.size = align_to_mmap(self.size, 1)
def extend_left_to(self, window, max_size):
"""Adjust the offset to start where the given window on our left ends if possible,
but don't make yourself larger than max_size.
The resize will assure that the new window still contains the old window area"""
rofs = self.ofs - window.ofs_end()
nsize = rofs + self.size
rofs -= nsize - min(nsize, max_size)
self.ofs = self.ofs - rofs
self.size += rofs
def extend_right_to(self, window, max_size):
"""Adjust the size to make our window end where the right window begins, but don't
get larger than max_size"""
self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
class MapRegion(object):
"""Defines a mapped region of memory, aligned to pagesizes
**Note:** deallocates used region automatically on destruction"""
__slots__ = [
'_b', # beginning of mapping
'_mf', # mapped memory chunk (as returned by mmap)
'_uc', # total amount of usages
'_size', # cached size of our memory map
'__weakref__'
]
#{ Configuration
#} END configuration
def __init__(self, path_or_fd, ofs, size, flags=0):
"""Initialize a region, allocate the memory map
:param path_or_fd: path to the file to map, or the opened file descriptor
:param ofs: **aligned** offset into the file to be mapped
:param size: if size is larger then the file on disk, the whole file will be
allocated the the size automatically adjusted
:param flags: additional flags to be given when opening the file.
:raise Exception: if no memory can be allocated"""
self._b = ofs
self._size = 0
self._uc = 0
if isinstance(path_or_fd, int):
fd = path_or_fd
else:
fd = os.open(path_or_fd, os.O_RDONLY | getattr(os, 'O_BINARY', 0) | flags)
# END handle fd
try:
kwargs = dict(access=ACCESS_READ, offset=ofs)
corrected_size = size
sizeofs = ofs
# have to correct size, otherwise (instead of the c version) it will
# bark that the size is too large ... many extra file accesses because
# if this ... argh !
actual_size = min(os.fstat(fd).st_size - sizeofs, corrected_size)
self._mf = mmap(fd, actual_size, **kwargs)
# END handle memory mode
self._size = len(self._mf)
finally:
if isinstance(path_or_fd, string_types()):
os.close(fd)
# END only close it if we opened it
# END close file handle
# We assume the first one to use us keeps us around
self.increment_client_count()
def __repr__(self):
return "MapRegion<%i, %i>" % (self._b, self.size())
#{ Interface
def buffer(self):
""":return: a buffer containing the memory"""
return self._mf
def map(self):
""":return: a memory map containing the memory"""
return self._mf
def ofs_begin(self):
""":return: absolute byte offset to the first byte of the mapping"""
return self._b
def size(self):
""":return: total size of the mapped region in bytes"""
return self._size
def ofs_end(self):
""":return: Absolute offset to one byte beyond the mapping into the file"""
return self._b + self._size
def includes_ofs(self, ofs):
""":return: True if the given offset can be read in our mapped region"""
return self._b <= ofs < self._b + self._size
def client_count(self):
""":return: number of clients currently using this region"""
return self._uc
def increment_client_count(self, ofs = 1):
"""Adjust the usage count by the given positive or negative offset.
If usage count equals 0, we will auto-release our resources
:return: True if we released resources, False otherwise. In the latter case, we can still be used"""
self._uc += ofs
assert self._uc > -1, "Increments must match decrements, usage counter negative: %i" % self._uc
if self.client_count() == 0:
self.release()
return True
else:
return False
# end handle release
def release(self):
"""Release all resources this instance might hold. Must only be called if there usage_count() is zero"""
self._mf.close()
#} END interface
class MapRegionList(list):
"""List of MapRegion instances associating a path with a list of regions."""
__slots__ = (
'_path_or_fd', # path or file descriptor which is mapped by all our regions
'_file_size' # total size of the file we map
)
def __new__(cls, path):
return super(MapRegionList, cls).__new__(cls)
def __init__(self, path_or_fd):
self._path_or_fd = path_or_fd
self._file_size = None
def path_or_fd(self):
""":return: path or file descriptor we are attached to"""
return self._path_or_fd
def file_size(self):
""":return: size of file we manager"""
if self._file_size is None:
if isinstance(self._path_or_fd, string_types()):
self._file_size = os.stat(self._path_or_fd).st_size
else:
self._file_size = os.fstat(self._path_or_fd).st_size
# END handle path type
# END update file size
return self._file_size
#} END utility classes
|
gitpython-developers/smmap | smmap/util.py | MapWindow.align | python | def align(self):
nofs = align_to_mmap(self.ofs, 0)
self.size += self.ofs - nofs # keep size constant
self.ofs = nofs
self.size = align_to_mmap(self.size, 1) | Assures the previous window area is contained in the new one | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L78-L83 | [
"def align_to_mmap(num, round_up):\n \"\"\"\n Align the given integer number to the closest page offset, which usually is 4096 bytes.\n\n :param round_up: if True, the next higher multiple of page size is used, otherwise\n the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0)\n :return: num rounded to closest page\"\"\"\n res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY\n if round_up and (res != num):\n res += ALLOCATIONGRANULARITY\n # END handle size\n return res\n"
] | class MapWindow(object):
"""Utility type which is used to snap windows towards each other, and to adjust their size"""
__slots__ = (
'ofs', # offset into the file in bytes
'size' # size of the window in bytes
)
def __init__(self, offset, size):
self.ofs = offset
self.size = size
def __repr__(self):
return "MapWindow(%i, %i)" % (self.ofs, self.size)
@classmethod
def from_region(cls, region):
""":return: new window from a region"""
return cls(region._b, region.size())
def ofs_end(self):
return self.ofs + self.size
def extend_left_to(self, window, max_size):
"""Adjust the offset to start where the given window on our left ends if possible,
but don't make yourself larger than max_size.
The resize will assure that the new window still contains the old window area"""
rofs = self.ofs - window.ofs_end()
nsize = rofs + self.size
rofs -= nsize - min(nsize, max_size)
self.ofs = self.ofs - rofs
self.size += rofs
def extend_right_to(self, window, max_size):
"""Adjust the size to make our window end where the right window begins, but don't
get larger than max_size"""
self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
|
gitpython-developers/smmap | smmap/util.py | MapWindow.extend_left_to | python | def extend_left_to(self, window, max_size):
rofs = self.ofs - window.ofs_end()
nsize = rofs + self.size
rofs -= nsize - min(nsize, max_size)
self.ofs = self.ofs - rofs
self.size += rofs | Adjust the offset to start where the given window on our left ends if possible,
but don't make yourself larger than max_size.
The resize will assure that the new window still contains the old window area | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L85-L93 | null | class MapWindow(object):
"""Utility type which is used to snap windows towards each other, and to adjust their size"""
__slots__ = (
'ofs', # offset into the file in bytes
'size' # size of the window in bytes
)
def __init__(self, offset, size):
self.ofs = offset
self.size = size
def __repr__(self):
return "MapWindow(%i, %i)" % (self.ofs, self.size)
@classmethod
def from_region(cls, region):
""":return: new window from a region"""
return cls(region._b, region.size())
def ofs_end(self):
return self.ofs + self.size
def align(self):
"""Assures the previous window area is contained in the new one"""
nofs = align_to_mmap(self.ofs, 0)
self.size += self.ofs - nofs # keep size constant
self.ofs = nofs
self.size = align_to_mmap(self.size, 1)
def extend_right_to(self, window, max_size):
"""Adjust the size to make our window end where the right window begins, but don't
get larger than max_size"""
self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
|
gitpython-developers/smmap | smmap/util.py | MapWindow.extend_right_to | python | def extend_right_to(self, window, max_size):
self.size = min(self.size + (window.ofs - self.ofs_end()), max_size) | Adjust the size to make our window end where the right window begins, but don't
get larger than max_size | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L95-L98 | [
"def ofs_end(self):\n return self.ofs + self.size\n"
] | class MapWindow(object):
"""Utility type which is used to snap windows towards each other, and to adjust their size"""
__slots__ = (
'ofs', # offset into the file in bytes
'size' # size of the window in bytes
)
def __init__(self, offset, size):
self.ofs = offset
self.size = size
def __repr__(self):
return "MapWindow(%i, %i)" % (self.ofs, self.size)
@classmethod
def from_region(cls, region):
""":return: new window from a region"""
return cls(region._b, region.size())
def ofs_end(self):
return self.ofs + self.size
def align(self):
"""Assures the previous window area is contained in the new one"""
nofs = align_to_mmap(self.ofs, 0)
self.size += self.ofs - nofs # keep size constant
self.ofs = nofs
self.size = align_to_mmap(self.size, 1)
def extend_left_to(self, window, max_size):
"""Adjust the offset to start where the given window on our left ends if possible,
but don't make yourself larger than max_size.
The resize will assure that the new window still contains the old window area"""
rofs = self.ofs - window.ofs_end()
nsize = rofs + self.size
rofs -= nsize - min(nsize, max_size)
self.ofs = self.ofs - rofs
self.size += rofs
|
gitpython-developers/smmap | smmap/util.py | MapRegion.includes_ofs | python | def includes_ofs(self, ofs):
return self._b <= ofs < self._b + self._size | :return: True if the given offset can be read in our mapped region | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L181-L183 | null | class MapRegion(object):
"""Defines a mapped region of memory, aligned to pagesizes
**Note:** deallocates used region automatically on destruction"""
__slots__ = [
'_b', # beginning of mapping
'_mf', # mapped memory chunk (as returned by mmap)
'_uc', # total amount of usages
'_size', # cached size of our memory map
'__weakref__'
]
#{ Configuration
#} END configuration
def __init__(self, path_or_fd, ofs, size, flags=0):
"""Initialize a region, allocate the memory map
:param path_or_fd: path to the file to map, or the opened file descriptor
:param ofs: **aligned** offset into the file to be mapped
:param size: if size is larger then the file on disk, the whole file will be
allocated the the size automatically adjusted
:param flags: additional flags to be given when opening the file.
:raise Exception: if no memory can be allocated"""
self._b = ofs
self._size = 0
self._uc = 0
if isinstance(path_or_fd, int):
fd = path_or_fd
else:
fd = os.open(path_or_fd, os.O_RDONLY | getattr(os, 'O_BINARY', 0) | flags)
# END handle fd
try:
kwargs = dict(access=ACCESS_READ, offset=ofs)
corrected_size = size
sizeofs = ofs
# have to correct size, otherwise (instead of the c version) it will
# bark that the size is too large ... many extra file accesses because
# if this ... argh !
actual_size = min(os.fstat(fd).st_size - sizeofs, corrected_size)
self._mf = mmap(fd, actual_size, **kwargs)
# END handle memory mode
self._size = len(self._mf)
finally:
if isinstance(path_or_fd, string_types()):
os.close(fd)
# END only close it if we opened it
# END close file handle
# We assume the first one to use us keeps us around
self.increment_client_count()
def __repr__(self):
return "MapRegion<%i, %i>" % (self._b, self.size())
#{ Interface
def buffer(self):
""":return: a buffer containing the memory"""
return self._mf
def map(self):
""":return: a memory map containing the memory"""
return self._mf
def ofs_begin(self):
""":return: absolute byte offset to the first byte of the mapping"""
return self._b
def size(self):
""":return: total size of the mapped region in bytes"""
return self._size
def ofs_end(self):
""":return: Absolute offset to one byte beyond the mapping into the file"""
return self._b + self._size
def client_count(self):
""":return: number of clients currently using this region"""
return self._uc
def increment_client_count(self, ofs = 1):
"""Adjust the usage count by the given positive or negative offset.
If usage count equals 0, we will auto-release our resources
:return: True if we released resources, False otherwise. In the latter case, we can still be used"""
self._uc += ofs
assert self._uc > -1, "Increments must match decrements, usage counter negative: %i" % self._uc
if self.client_count() == 0:
self.release()
return True
else:
return False
# end handle release
def release(self):
"""Release all resources this instance might hold. Must only be called if there usage_count() is zero"""
self._mf.close()
|
gitpython-developers/smmap | smmap/util.py | MapRegion.increment_client_count | python | def increment_client_count(self, ofs = 1):
self._uc += ofs
assert self._uc > -1, "Increments must match decrements, usage counter negative: %i" % self._uc
if self.client_count() == 0:
self.release()
return True
else:
return False | Adjust the usage count by the given positive or negative offset.
If usage count equals 0, we will auto-release our resources
:return: True if we released resources, False otherwise. In the latter case, we can still be used | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L189-L200 | [
"def client_count(self):\n \"\"\":return: number of clients currently using this region\"\"\"\n return self._uc\n"
] | class MapRegion(object):
"""Defines a mapped region of memory, aligned to pagesizes
**Note:** deallocates used region automatically on destruction"""
__slots__ = [
'_b', # beginning of mapping
'_mf', # mapped memory chunk (as returned by mmap)
'_uc', # total amount of usages
'_size', # cached size of our memory map
'__weakref__'
]
#{ Configuration
#} END configuration
def __init__(self, path_or_fd, ofs, size, flags=0):
"""Initialize a region, allocate the memory map
:param path_or_fd: path to the file to map, or the opened file descriptor
:param ofs: **aligned** offset into the file to be mapped
:param size: if size is larger then the file on disk, the whole file will be
allocated the the size automatically adjusted
:param flags: additional flags to be given when opening the file.
:raise Exception: if no memory can be allocated"""
self._b = ofs
self._size = 0
self._uc = 0
if isinstance(path_or_fd, int):
fd = path_or_fd
else:
fd = os.open(path_or_fd, os.O_RDONLY | getattr(os, 'O_BINARY', 0) | flags)
# END handle fd
try:
kwargs = dict(access=ACCESS_READ, offset=ofs)
corrected_size = size
sizeofs = ofs
# have to correct size, otherwise (instead of the c version) it will
# bark that the size is too large ... many extra file accesses because
# if this ... argh !
actual_size = min(os.fstat(fd).st_size - sizeofs, corrected_size)
self._mf = mmap(fd, actual_size, **kwargs)
# END handle memory mode
self._size = len(self._mf)
finally:
if isinstance(path_or_fd, string_types()):
os.close(fd)
# END only close it if we opened it
# END close file handle
# We assume the first one to use us keeps us around
self.increment_client_count()
def __repr__(self):
return "MapRegion<%i, %i>" % (self._b, self.size())
#{ Interface
def buffer(self):
""":return: a buffer containing the memory"""
return self._mf
def map(self):
""":return: a memory map containing the memory"""
return self._mf
def ofs_begin(self):
""":return: absolute byte offset to the first byte of the mapping"""
return self._b
def size(self):
""":return: total size of the mapped region in bytes"""
return self._size
def ofs_end(self):
""":return: Absolute offset to one byte beyond the mapping into the file"""
return self._b + self._size
def includes_ofs(self, ofs):
""":return: True if the given offset can be read in our mapped region"""
return self._b <= ofs < self._b + self._size
def client_count(self):
""":return: number of clients currently using this region"""
return self._uc
# end handle release
def release(self):
"""Release all resources this instance might hold. Must only be called if there usage_count() is zero"""
self._mf.close()
|
gitpython-developers/smmap | smmap/util.py | MapRegionList.file_size | python | def file_size(self):
if self._file_size is None:
if isinstance(self._path_or_fd, string_types()):
self._file_size = os.stat(self._path_or_fd).st_size
else:
self._file_size = os.fstat(self._path_or_fd).st_size
# END handle path type
# END update file size
return self._file_size | :return: size of file we manager | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L229-L238 | [
"def string_types():\n if sys.version_info[0] >= 3:\n return str\n else:\n return basestring\n"
] | class MapRegionList(list):
"""List of MapRegion instances associating a path with a list of regions."""
__slots__ = (
'_path_or_fd', # path or file descriptor which is mapped by all our regions
'_file_size' # total size of the file we map
)
def __new__(cls, path):
return super(MapRegionList, cls).__new__(cls)
def __init__(self, path_or_fd):
self._path_or_fd = path_or_fd
self._file_size = None
def path_or_fd(self):
""":return: path or file descriptor we are attached to"""
return self._path_or_fd
|
gitpython-developers/smmap | smmap/buf.py | SlidingWindowMapBuffer.begin_access | python | def begin_access(self, cursor=None, offset=0, size=sys.maxsize, flags=0):
if cursor:
self._c = cursor
# END update our cursor
# reuse existing cursors if possible
if self._c is not None and self._c.is_associated():
res = self._c.use_region(offset, size, flags).is_valid()
if res:
# if given size is too large or default, we computer a proper size
# If its smaller, we assume the combination between offset and size
# as chosen by the user is correct and use it !
# If not, the user is in trouble.
if size > self._c.file_size():
size = self._c.file_size() - offset
# END handle size
self._size = size
# END set size
return res
# END use our cursor
return False | Call this before the first use of this instance. The method was already
called by the constructor in case sufficient information was provided.
For more information no the parameters, see the __init__ method
:param path: if cursor is None the existing one will be used.
:return: True if the buffer can be used | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/buf.py#L108-L134 | null | class SlidingWindowMapBuffer(object):
"""A buffer like object which allows direct byte-wise object and slicing into
memory of a mapped file. The mapping is controlled by the provided cursor.
The buffer is relative, that is if you map an offset, index 0 will map to the
first byte at the offset you used during initialization or begin_access
**Note:** Although this type effectively hides the fact that there are mapped windows
underneath, it can unfortunately not be used in any non-pure python method which
needs a buffer or string"""
__slots__ = (
'_c', # our cursor
'_size', # our supposed size
)
def __init__(self, cursor=None, offset=0, size=sys.maxsize, flags=0):
"""Initalize the instance to operate on the given cursor.
:param cursor: if not None, the associated cursor to the file you want to access
If None, you have call begin_access before using the buffer and provide a cursor
:param offset: absolute offset in bytes
:param size: the total size of the mapping. Defaults to the maximum possible size
From that point on, the __len__ of the buffer will be the given size or the file size.
If the size is larger than the mappable area, you can only access the actually available
area, although the length of the buffer is reported to be your given size.
Hence it is in your own interest to provide a proper size !
:param flags: Additional flags to be passed to os.open
:raise ValueError: if the buffer could not achieve a valid state"""
self._c = cursor
if cursor and not self.begin_access(cursor, offset, size, flags):
raise ValueError("Failed to allocate the buffer - probably the given offset is out of bounds")
# END handle offset
def __del__(self):
self.end_access()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end_access()
def __len__(self):
return self._size
def __getitem__(self, i):
if isinstance(i, slice):
return self.__getslice__(i.start or 0, i.stop or self._size)
c = self._c
assert c.is_valid()
if i < 0:
i = self._size + i
if not c.includes_ofs(i):
c.use_region(i, 1)
# END handle region usage
return c.buffer()[i - c.ofs_begin()]
def __getslice__(self, i, j):
c = self._c
# fast path, slice fully included - safes a concatenate operation and
# should be the default
assert c.is_valid()
if i < 0:
i = self._size + i
if j == sys.maxsize:
j = self._size
if j < 0:
j = self._size + j
if (c.ofs_begin() <= i) and (j < c.ofs_end()):
b = c.ofs_begin()
return c.buffer()[i - b:j - b]
else:
l = j - i # total length
ofs = i
# It's fastest to keep tokens and join later, especially in py3, which was 7 times slower
# in the previous iteration of this code
pyvers = sys.version_info[:2]
md = list()
while l:
c.use_region(ofs, l)
assert c.is_valid()
d = c.buffer()[:l]
ofs += len(d)
l -= len(d)
# Make sure we don't keep references, as c.use_region() might attempt to free resources, but
# can't unless we use pure bytes
if hasattr(d, 'tobytes'):
d = d.tobytes()
md.append(d)
# END while there are bytes to read
return bytes().join(md)
# END fast or slow path
#{ Interface
def end_access(self):
"""Call this method once you are done using the instance. It is automatically
called on destruction, and should be called just in time to allow system
resources to be freed.
Once you called end_access, you must call begin access before reusing this instance!"""
self._size = 0
if self._c is not None:
self._c.unuse_region()
# END unuse region
def cursor(self):
""":return: the currently set cursor which provides access to the data"""
return self._c
|
mozilla/django-tidings | tidings/models.py | multi_raw | python | def multi_raw(query, params, models, model_to_fields):
cursor = connections[router.db_for_read(models[0])].cursor()
cursor.execute(query, params)
rows = cursor.fetchall()
for row in rows:
row_iter = iter(row)
yield [model_class(**dict((a, next(row_iter))
for a in model_to_fields[model_class]))
for model_class in models] | Scoop multiple model instances out of the DB at once, given a query that
returns all fields of each.
Return an iterable of sequences of model instances parallel to the
``models`` sequence of classes. For example::
[(<User such-and-such>, <Watch such-and-such>), ...] | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/models.py#L16-L34 | null | from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db import models, connections, router
from .compat import next, text_type
from .utils import import_from_setting, reverse
ModelBase = import_from_setting('TIDINGS_MODEL_BASE', models.Model)
class Watch(ModelBase):
"""The registration of a user's interest in a certain event
At minimum, specifies an event_type and thereby an
:class:`~tidings.events.Event` subclass. May also specify a content type
and/or object ID and, indirectly, any number of
:class:`WatchFilters <WatchFilter>`.
"""
#: Key used by an Event to find watches it manages:
event_type = models.CharField(max_length=30, db_index=True)
#: Optional reference to a content type:
content_type = models.ForeignKey(ContentType, null=True, blank=True,
on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(db_index=True, null=True)
content_object = GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
on_delete=models.CASCADE)
#: Email stored only in the case of anonymous users:
email = models.EmailField(db_index=True, null=True, blank=True)
#: Secret for activating anonymous watch email addresses.
secret = models.CharField(max_length=10, null=True, blank=True)
#: Active watches receive notifications, inactive watches don't.
is_active = models.BooleanField(default=False, db_index=True)
def __unicode__(self):
# TODO: Trace event_type back to find the Event subclass, and ask it
# how to describe me in English.
rest = self.content_object or self.content_type or self.object_id
return u'id=%s, type=%s, content_object=%s' % (self.pk,
self.event_type,
text_type(rest))
def activate(self):
"""Enable this watch so it actually fires.
Return ``self`` to support method chaining.
"""
self.is_active = True
return self
def unsubscribe_url(self):
"""Return the absolute URL to visit to delete me."""
server_relative = ('%s?s=%s' % (reverse('tidings.unsubscribe',
args=[self.pk]),
self.secret))
return 'https://%s%s' % (Site.objects.get_current().domain,
server_relative)
class WatchFilter(ModelBase):
"""Additional key/value pairs that pare down the scope of a watch"""
watch = models.ForeignKey(Watch, related_name='filters',
on_delete=models.CASCADE)
name = models.CharField(max_length=20)
#: Either an int or the hash of an item in a reasonably small set, which is
#: indicated by the name field. See comments by
#: :func:`~tidings.utils.hash_to_unsigned()` for more on what is reasonably
#: small.
value = models.PositiveIntegerField()
class Meta(object):
# There's no particular reason we couldn't allow multiple values for
# one name to be ORed together, but the API needs a little work
# (accepting lists passed to notify()) to support that.
#
# This ordering makes the index usable for lookups by name.
unique_together = ('name', 'watch')
def __unicode__(self):
return u'WatchFilter %s: %s=%s' % (self.pk, self.name, self.value)
class NotificationsMixin(models.Model):
"""Mixin for notifications models that adds watches as a generic relation.
So we get cascading deletes for free, yay!
"""
watches = GenericRelation(Watch)
class Meta(object):
abstract = True
class EmailUser(AnonymousUser):
"""An anonymous user identified only by email address.
This is based on Django's AnonymousUser, so you can use the
``is_authenticated`` property to tell that this is an anonymous user.
"""
def __init__(self, email=''):
self.email = email
def __unicode__(self):
return 'Anonymous user <%s>' % self.email
__repr__ = AnonymousUser.__str__
def __eq__(self, other):
return self.email == other.email
def __ne__(self, other):
return self.email != other.email
def __hash__(self):
return hash(self.email)
|
mozilla/django-tidings | tidings/models.py | Watch.unsubscribe_url | python | def unsubscribe_url(self):
server_relative = ('%s?s=%s' % (reverse('tidings.unsubscribe',
args=[self.pk]),
self.secret))
return 'https://%s%s' % (Site.objects.get_current().domain,
server_relative) | Return the absolute URL to visit to delete me. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/models.py#L83-L89 | null | class Watch(ModelBase):
"""The registration of a user's interest in a certain event
At minimum, specifies an event_type and thereby an
:class:`~tidings.events.Event` subclass. May also specify a content type
and/or object ID and, indirectly, any number of
:class:`WatchFilters <WatchFilter>`.
"""
#: Key used by an Event to find watches it manages:
event_type = models.CharField(max_length=30, db_index=True)
#: Optional reference to a content type:
content_type = models.ForeignKey(ContentType, null=True, blank=True,
on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(db_index=True, null=True)
content_object = GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
on_delete=models.CASCADE)
#: Email stored only in the case of anonymous users:
email = models.EmailField(db_index=True, null=True, blank=True)
#: Secret for activating anonymous watch email addresses.
secret = models.CharField(max_length=10, null=True, blank=True)
#: Active watches receive notifications, inactive watches don't.
is_active = models.BooleanField(default=False, db_index=True)
def __unicode__(self):
# TODO: Trace event_type back to find the Event subclass, and ask it
# how to describe me in English.
rest = self.content_object or self.content_type or self.object_id
return u'id=%s, type=%s, content_object=%s' % (self.pk,
self.event_type,
text_type(rest))
def activate(self):
"""Enable this watch so it actually fires.
Return ``self`` to support method chaining.
"""
self.is_active = True
return self
|
mozilla/django-tidings | tidings/tasks.py | claim_watches | python | def claim_watches(user):
Watch.objects.filter(email=user.email).update(email=None, user=user) | Attach any anonymous watches having a user's email to that user.
Call this from your user registration process if you like. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/tasks.py#L7-L13 | null | from celery.task import task
from tidings.models import Watch
@task()
|
mozilla/django-tidings | tidings/utils.py | collate | python | def collate(*iterables, **kwargs):
key = kwargs.pop('key', lambda a: a)
reverse = kwargs.pop('reverse', False)
min_or_max = max if reverse else min
rows = [iter(iterable) for iterable in iterables if iterable]
next_values = {}
by_key = []
def gather_next_value(row, index):
try:
next_value = next(row)
except StopIteration:
pass
else:
next_values[index] = next_value
by_key.append((key(next_value), index))
for index, row in enumerate(rows):
gather_next_value(row, index)
while by_key:
key_value, index = min_or_max(by_key)
by_key.remove((key_value, index))
next_value = next_values.pop(index)
yield next_value
gather_next_value(rows[index], index) | Return an iterable ordered collation of the already-sorted items
from each of ``iterables``, compared by kwarg ``key``.
If ``reverse=True`` is passed, iterables must return their results in
descending order rather than ascending. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/utils.py#L13-L46 | [
"def gather_next_value(row, index):\n try:\n next_value = next(row)\n except StopIteration:\n pass\n else:\n next_values[index] = next_value\n by_key.append((key(next_value), index))\n"
] | from zlib import crc32
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMessage
from django.template import Context, loader
from django.urls import reverse as django_reverse
from django.utils.module_loading import import_string
from .compat import next, string_types
def hash_to_unsigned(data):
"""If ``data`` is a string or unicode string, return an unsigned 4-byte int
hash of it. If ``data`` is already an int that fits those parameters,
return it verbatim.
If ``data`` is an int outside that range, behavior is undefined at the
moment. We rely on the ``PositiveIntegerField`` on
:class:`~tidings.models.WatchFilter` to scream if the int is too long for
the field.
We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose
hash function, it has no collisions on a dictionary of 38,470 English
words, which should be fine for the small sets that :class:`WatchFilters
<tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is
fast and available as a built-in function in some DBs. If your set of
filter values is very large or has different CRC32 distribution properties
than English words, you might want to do your own hashing in your
:class:`~tidings.events.Event` subclass and pass ints when specifying
filter values.
"""
if isinstance(data, string_types):
# Return a CRC32 value identical across Python versions and platforms
# by stripping the sign bit as on
# http://docs.python.org/library/zlib.html.
return crc32(data.encode('utf-8')) & 0xffffffff
else:
return int(data)
def emails_with_users_and_watches(
subject, template_path, vars, users_and_watches,
from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs):
"""Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor
"""
template = loader.get_template(template_path)
context = Context(vars)
for u, w in users_and_watches:
context['user'] = u
# Arbitrary single watch for compatibility with 0.1
# TODO: remove.
context['watch'] = w[0]
context['watches'] = w
yield EmailMessage(subject,
template.render(context),
from_email,
[u.email],
**extra_kwargs)
def import_from_setting(setting_name, fallback):
"""Return the resolution of an import path stored in a Django setting.
:arg setting_name: The name of the setting holding the import path
:arg fallback: An alternate object to use if the setting is empty or
doesn't exist
Raise ImproperlyConfigured if a path is given that can't be resolved.
"""
path = getattr(settings, setting_name, None)
if path:
try:
return import_string(path)
except ImportError:
raise ImproperlyConfigured('%s: No such path.' % path)
else:
return fallback
# Here to be imported by others:
reverse = import_from_setting('TIDINGS_REVERSE', django_reverse) # no QA
|
mozilla/django-tidings | tidings/utils.py | hash_to_unsigned | python | def hash_to_unsigned(data):
if isinstance(data, string_types):
# Return a CRC32 value identical across Python versions and platforms
# by stripping the sign bit as on
# http://docs.python.org/library/zlib.html.
return crc32(data.encode('utf-8')) & 0xffffffff
else:
return int(data) | If ``data`` is a string or unicode string, return an unsigned 4-byte int
hash of it. If ``data`` is already an int that fits those parameters,
return it verbatim.
If ``data`` is an int outside that range, behavior is undefined at the
moment. We rely on the ``PositiveIntegerField`` on
:class:`~tidings.models.WatchFilter` to scream if the int is too long for
the field.
We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose
hash function, it has no collisions on a dictionary of 38,470 English
words, which should be fine for the small sets that :class:`WatchFilters
<tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is
fast and available as a built-in function in some DBs. If your set of
filter values is very large or has different CRC32 distribution properties
than English words, you might want to do your own hashing in your
:class:`~tidings.events.Event` subclass and pass ints when specifying
filter values. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/utils.py#L49-L76 | null | from zlib import crc32
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMessage
from django.template import Context, loader
from django.urls import reverse as django_reverse
from django.utils.module_loading import import_string
from .compat import next, string_types
def collate(*iterables, **kwargs):
"""Return an iterable ordered collation of the already-sorted items
from each of ``iterables``, compared by kwarg ``key``.
If ``reverse=True`` is passed, iterables must return their results in
descending order rather than ascending.
"""
key = kwargs.pop('key', lambda a: a)
reverse = kwargs.pop('reverse', False)
min_or_max = max if reverse else min
rows = [iter(iterable) for iterable in iterables if iterable]
next_values = {}
by_key = []
def gather_next_value(row, index):
try:
next_value = next(row)
except StopIteration:
pass
else:
next_values[index] = next_value
by_key.append((key(next_value), index))
for index, row in enumerate(rows):
gather_next_value(row, index)
while by_key:
key_value, index = min_or_max(by_key)
by_key.remove((key_value, index))
next_value = next_values.pop(index)
yield next_value
gather_next_value(rows[index], index)
def emails_with_users_and_watches(
subject, template_path, vars, users_and_watches,
from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs):
"""Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor
"""
template = loader.get_template(template_path)
context = Context(vars)
for u, w in users_and_watches:
context['user'] = u
# Arbitrary single watch for compatibility with 0.1
# TODO: remove.
context['watch'] = w[0]
context['watches'] = w
yield EmailMessage(subject,
template.render(context),
from_email,
[u.email],
**extra_kwargs)
def import_from_setting(setting_name, fallback):
"""Return the resolution of an import path stored in a Django setting.
:arg setting_name: The name of the setting holding the import path
:arg fallback: An alternate object to use if the setting is empty or
doesn't exist
Raise ImproperlyConfigured if a path is given that can't be resolved.
"""
path = getattr(settings, setting_name, None)
if path:
try:
return import_string(path)
except ImportError:
raise ImproperlyConfigured('%s: No such path.' % path)
else:
return fallback
# Here to be imported by others:
reverse = import_from_setting('TIDINGS_REVERSE', django_reverse) # no QA
|
mozilla/django-tidings | tidings/utils.py | emails_with_users_and_watches | python | def emails_with_users_and_watches(
subject, template_path, vars, users_and_watches,
from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs):
template = loader.get_template(template_path)
context = Context(vars)
for u, w in users_and_watches:
context['user'] = u
# Arbitrary single watch for compatibility with 0.1
# TODO: remove.
context['watch'] = w[0]
context['watches'] = w
yield EmailMessage(subject,
template.render(context),
from_email,
[u.email],
**extra_kwargs) | Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/utils.py#L79-L107 | null | from zlib import crc32
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMessage
from django.template import Context, loader
from django.urls import reverse as django_reverse
from django.utils.module_loading import import_string
from .compat import next, string_types
def collate(*iterables, **kwargs):
"""Return an iterable ordered collation of the already-sorted items
from each of ``iterables``, compared by kwarg ``key``.
If ``reverse=True`` is passed, iterables must return their results in
descending order rather than ascending.
"""
key = kwargs.pop('key', lambda a: a)
reverse = kwargs.pop('reverse', False)
min_or_max = max if reverse else min
rows = [iter(iterable) for iterable in iterables if iterable]
next_values = {}
by_key = []
def gather_next_value(row, index):
try:
next_value = next(row)
except StopIteration:
pass
else:
next_values[index] = next_value
by_key.append((key(next_value), index))
for index, row in enumerate(rows):
gather_next_value(row, index)
while by_key:
key_value, index = min_or_max(by_key)
by_key.remove((key_value, index))
next_value = next_values.pop(index)
yield next_value
gather_next_value(rows[index], index)
def hash_to_unsigned(data):
"""If ``data`` is a string or unicode string, return an unsigned 4-byte int
hash of it. If ``data`` is already an int that fits those parameters,
return it verbatim.
If ``data`` is an int outside that range, behavior is undefined at the
moment. We rely on the ``PositiveIntegerField`` on
:class:`~tidings.models.WatchFilter` to scream if the int is too long for
the field.
We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose
hash function, it has no collisions on a dictionary of 38,470 English
words, which should be fine for the small sets that :class:`WatchFilters
<tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is
fast and available as a built-in function in some DBs. If your set of
filter values is very large or has different CRC32 distribution properties
than English words, you might want to do your own hashing in your
:class:`~tidings.events.Event` subclass and pass ints when specifying
filter values.
"""
if isinstance(data, string_types):
# Return a CRC32 value identical across Python versions and platforms
# by stripping the sign bit as on
# http://docs.python.org/library/zlib.html.
return crc32(data.encode('utf-8')) & 0xffffffff
else:
return int(data)
def import_from_setting(setting_name, fallback):
"""Return the resolution of an import path stored in a Django setting.
:arg setting_name: The name of the setting holding the import path
:arg fallback: An alternate object to use if the setting is empty or
doesn't exist
Raise ImproperlyConfigured if a path is given that can't be resolved.
"""
path = getattr(settings, setting_name, None)
if path:
try:
return import_string(path)
except ImportError:
raise ImproperlyConfigured('%s: No such path.' % path)
else:
return fallback
# Here to be imported by others:
reverse = import_from_setting('TIDINGS_REVERSE', django_reverse) # no QA
|
mozilla/django-tidings | tidings/utils.py | import_from_setting | python | def import_from_setting(setting_name, fallback):
path = getattr(settings, setting_name, None)
if path:
try:
return import_string(path)
except ImportError:
raise ImproperlyConfigured('%s: No such path.' % path)
else:
return fallback | Return the resolution of an import path stored in a Django setting.
:arg setting_name: The name of the setting holding the import path
:arg fallback: An alternate object to use if the setting is empty or
doesn't exist
Raise ImproperlyConfigured if a path is given that can't be resolved. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/utils.py#L110-L127 | null | from zlib import crc32
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMessage
from django.template import Context, loader
from django.urls import reverse as django_reverse
from django.utils.module_loading import import_string
from .compat import next, string_types
def collate(*iterables, **kwargs):
"""Return an iterable ordered collation of the already-sorted items
from each of ``iterables``, compared by kwarg ``key``.
If ``reverse=True`` is passed, iterables must return their results in
descending order rather than ascending.
"""
key = kwargs.pop('key', lambda a: a)
reverse = kwargs.pop('reverse', False)
min_or_max = max if reverse else min
rows = [iter(iterable) for iterable in iterables if iterable]
next_values = {}
by_key = []
def gather_next_value(row, index):
try:
next_value = next(row)
except StopIteration:
pass
else:
next_values[index] = next_value
by_key.append((key(next_value), index))
for index, row in enumerate(rows):
gather_next_value(row, index)
while by_key:
key_value, index = min_or_max(by_key)
by_key.remove((key_value, index))
next_value = next_values.pop(index)
yield next_value
gather_next_value(rows[index], index)
def hash_to_unsigned(data):
"""If ``data`` is a string or unicode string, return an unsigned 4-byte int
hash of it. If ``data`` is already an int that fits those parameters,
return it verbatim.
If ``data`` is an int outside that range, behavior is undefined at the
moment. We rely on the ``PositiveIntegerField`` on
:class:`~tidings.models.WatchFilter` to scream if the int is too long for
the field.
We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose
hash function, it has no collisions on a dictionary of 38,470 English
words, which should be fine for the small sets that :class:`WatchFilters
<tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is
fast and available as a built-in function in some DBs. If your set of
filter values is very large or has different CRC32 distribution properties
than English words, you might want to do your own hashing in your
:class:`~tidings.events.Event` subclass and pass ints when specifying
filter values.
"""
if isinstance(data, string_types):
# Return a CRC32 value identical across Python versions and platforms
# by stripping the sign bit as on
# http://docs.python.org/library/zlib.html.
return crc32(data.encode('utf-8')) & 0xffffffff
else:
return int(data)
def emails_with_users_and_watches(
subject, template_path, vars, users_and_watches,
from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs):
"""Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor
"""
template = loader.get_template(template_path)
context = Context(vars)
for u, w in users_and_watches:
context['user'] = u
# Arbitrary single watch for compatibility with 0.1
# TODO: remove.
context['watch'] = w[0]
context['watches'] = w
yield EmailMessage(subject,
template.render(context),
from_email,
[u.email],
**extra_kwargs)
# Here to be imported by others:
reverse = import_from_setting('TIDINGS_REVERSE', django_reverse) # no QA
|
mozilla/django-tidings | tidings/views.py | unsubscribe | python | def unsubscribe(request, watch_id):
ext = getattr(settings, 'TIDINGS_TEMPLATE_EXTENSION', 'html')
# Grab the watch and secret; complain if either is wrong:
try:
watch = Watch.objects.get(pk=watch_id)
# 's' is for 'secret' but saves wrapping in mails
secret = request.GET.get('s')
if secret != watch.secret:
raise Watch.DoesNotExist
except Watch.DoesNotExist:
return render(request, 'tidings/unsubscribe_error.' + ext)
if request.method == 'POST':
watch.delete()
return render(request, 'tidings/unsubscribe_success.' + ext)
return render(request, 'tidings/unsubscribe.' + ext) | Unsubscribe from (i.e. delete) the watch of ID ``watch_id``.
Expects an ``s`` querystring parameter matching the watch's secret.
GET will result in a confirmation page (or a failure page if the secret is
wrong). POST will actually delete the watch (again, if the secret is
correct).
Uses these templates:
* tidings/unsubscribe.html - Asks user to confirm deleting a watch
* tidings/unsubscribe_error.html - Shown when a watch is not found
* tidings/unsubscribe_success.html - Shown when a watch is deleted
The shipped templates assume a ``head_title`` and a ``content`` block
in a ``base.html`` template.
The template extension can be changed from the default ``html`` using
the setting :data:`~django.conf.settings.TIDINGS_TEMPLATE_EXTENSION`. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/views.py#L7-L44 | null | from django.conf import settings
from django.shortcuts import render
from tidings.models import Watch
|
mozilla/django-tidings | tidings/events.py | _unique_by_email | python | def _unique_by_email(users_and_watches):
def ensure_user_has_email(user, cluster_email):
"""Make sure the user in the user-watch pair has an email address.
The caller guarantees us an email from either the user or the watch. If
the passed-in user has no email, we return an EmailUser instead having
the email address from the watch.
"""
# Some of these cases shouldn't happen, but we're tolerant.
if not getattr(user, 'email', ''):
user = EmailUser(cluster_email)
return user
# TODO: Do this instead with clever SQL that somehow returns just the
# best row for each email.
cluster_email = '' # email of current cluster
favorite_user = None # best user in cluster so far
watches = [] # all watches in cluster
for u, w in users_and_watches:
# w always has at least 1 Watch. All the emails are the same.
row_email = u.email or w[0].email
if cluster_email.lower() != row_email.lower():
# Starting a new cluster.
if cluster_email != '':
# Ship the favorites from the previous cluster:
yield (ensure_user_has_email(favorite_user, cluster_email),
watches)
favorite_user, watches = u, []
cluster_email = row_email
elif ((not favorite_user.email or not u.is_authenticated) and
u.email and u.is_authenticated):
favorite_user = u
watches.extend(w)
if favorite_user is not None:
yield ensure_user_has_email(favorite_user, cluster_email), watches | Given a sequence of (User/EmailUser, [Watch, ...]) pairs
clustered by email address (which is never ''), yield from each
cluster a single pair like this::
(User/EmailUser, [Watch, Watch, ...]).
The User/Email is that of...
(1) the first incoming pair where the User has an email and is not
anonymous, or, if there isn't such a user...
(2) the first pair.
The list of Watches consists of all those found in the cluster.
Compares email addresses case-insensitively. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L24-L76 | [
"def ensure_user_has_email(user, cluster_email):\n \"\"\"Make sure the user in the user-watch pair has an email address.\n\n The caller guarantees us an email from either the user or the watch. If\n the passed-in user has no email, we return an EmailUser instead having\n the email address from the watch.\n\n \"\"\"\n # Some of these cases shouldn't happen, but we're tolerant.\n if not getattr(user, 'email', ''):\n user = EmailUser(cluster_email)\n return user\n"
] | from collections import Sequence
from smtplib import SMTPException
import random
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.db.models import Q
from celery.task import task
from .compat import iteritems, iterkeys, string_types, range
from .models import Watch, WatchFilter, EmailUser, multi_raw
from .utils import collate, hash_to_unsigned
class ActivationRequestFailed(Exception):
"""Raised when activation request fails, e.g. if email could not be sent"""
def __init__(self, msgs):
self.msgs = msgs
class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude)
@task
def _fire_task(self, exclude=None):
"""Build and send the emails as a celery task."""
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m])
@classmethod
def _validate_filters(cls, filters):
"""Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class."""
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k))
def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
"""Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in.
"""
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields))
@classmethod
def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches
@classmethod
# Funny arg name to reserve use of nice ones for filters
def is_notifying(cls, user_or_email_, object_id=None, **filters):
"""Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response.
"""
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists()
@classmethod
def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
class EventUnion(Event):
"""Fireable conglomeration of multiple events
Use this when you want to send a single mail to each person watching any of
several events. For example, this sends only 1 mail to a given user, even
if he was being notified of all 3 events::
EventUnion(SomeEvent(), OtherEvent(), ThirdEvent()).fire()
"""
# Calls some private methods on events, but this and Event are good
# friends.
def __init__(self, *events):
""":arg events: the events of which to take the union"""
super(EventUnion, self).__init__()
self.events = events
def _mails(self, users_and_watches):
"""Default implementation calls the
:meth:`~tidings.events.Event._mails()` of my first event but may
pass it any of my events as ``self``.
Use this default implementation when the content of each event's mail
template is essentially the same, e.g. "This new post was made.
Enjoy.". When the receipt of a second mail from the second event would
add no value, this is a fine choice. If the second event's email would
add value, you should probably fire both events independently and let
both mails be delivered. Or, if you would like to send a single mail
with a custom template for a batch of events, just subclass
:class:`EventUnion` and override this method.
"""
return self.events[0]._mails(users_and_watches)
def _users_watching(self, **kwargs):
# Get a sorted iterable of user-watches pairs:
def email_key(pair):
user, watch = pair
return user.email.lower()
users_and_watches = collate(
*[e._users_watching(**kwargs) for e in self.events],
key=email_key,
reverse=True)
# Pick the best User out of each cluster of identical email addresses:
return _unique_by_email(users_and_watches)
class InstanceEvent(Event):
"""Abstract superclass for watching a specific instance of a Model.
Subclasses must specify an ``event_type`` and should specify a
``content_type``.
"""
def __init__(self, instance, *args, **kwargs):
"""Initialize an InstanceEvent
:arg instance: the instance someone would have to be watching in
order to be notified when this event is fired.
"""
super(InstanceEvent, self).__init__(*args, **kwargs)
self.instance = instance
@classmethod
def notify(cls, user_or_email, instance):
"""Create, save, and return a watch which fires when something
happens to ``instance``."""
return super(InstanceEvent, cls).notify(user_or_email,
object_id=instance.pk)
@classmethod
def stop_notifying(cls, user_or_email, instance):
"""Delete the watch created by notify."""
super(InstanceEvent, cls).stop_notifying(user_or_email,
object_id=instance.pk)
@classmethod
def is_notifying(cls, user_or_email, instance):
"""Check if the watch created by notify exists."""
return super(InstanceEvent, cls).is_notifying(user_or_email,
object_id=instance.pk)
def _users_watching(self, **kwargs):
"""Return users watching this instance."""
return self._users_watching_by_filter(object_id=self.instance.pk,
**kwargs)
|
mozilla/django-tidings | tidings/events.py | Event.fire | python | def fire(self, exclude=None, delay=True):
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude) | Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L110-L136 | null | class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
@task
def _fire_task(self, exclude=None):
"""Build and send the emails as a celery task."""
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m])
@classmethod
def _validate_filters(cls, filters):
"""Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class."""
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k))
def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
"""Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in.
"""
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields))
@classmethod
def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches
@classmethod
# Funny arg name to reserve use of nice ones for filters
def is_notifying(cls, user_or_email_, object_id=None, **filters):
"""Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response.
"""
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists()
@classmethod
def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
|
mozilla/django-tidings | tidings/events.py | Event._fire_task | python | def _fire_task(self, exclude=None):
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m]) | Build and send the emails as a celery task. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L139-L145 | null | class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude)
@task
@classmethod
def _validate_filters(cls, filters):
"""Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class."""
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k))
def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
"""Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in.
"""
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields))
@classmethod
def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches
@classmethod
# Funny arg name to reserve use of nice ones for filters
def is_notifying(cls, user_or_email_, object_id=None, **filters):
"""Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response.
"""
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists()
@classmethod
def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
|
mozilla/django-tidings | tidings/events.py | Event._validate_filters | python | def _validate_filters(cls, filters):
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k)) | Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L148-L155 | null | class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude)
@task
def _fire_task(self, exclude=None):
"""Build and send the emails as a celery task."""
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m])
@classmethod
def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
"""Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in.
"""
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields))
@classmethod
def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches
@classmethod
# Funny arg name to reserve use of nice ones for filters
def is_notifying(cls, user_or_email_, object_id=None, **filters):
"""Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response.
"""
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists()
@classmethod
def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
|
mozilla/django-tidings | tidings/events.py | Event._users_watching_by_filter | python | def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields)) | Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L157-L283 | [
"def multi_raw(query, params, models, model_to_fields):\n \"\"\"Scoop multiple model instances out of the DB at once, given a query that\n returns all fields of each.\n\n Return an iterable of sequences of model instances parallel to the\n ``models`` sequence of classes. For example::\n\n [(<User such-and-such>, <Watch such-and-such>), ...]\n\n \"\"\"\n cursor = connections[router.db_for_read(models[0])].cursor()\n cursor.execute(query, params)\n rows = cursor.fetchall()\n\n for row in rows:\n row_iter = iter(row)\n yield [model_class(**dict((a, next(row_iter))\n for a in model_to_fields[model_class]))\n for model_class in models]\n",
"def _unique_by_email(users_and_watches):\n \"\"\"Given a sequence of (User/EmailUser, [Watch, ...]) pairs\n clustered by email address (which is never ''), yield from each\n cluster a single pair like this::\n\n (User/EmailUser, [Watch, Watch, ...]).\n\n The User/Email is that of...\n (1) the first incoming pair where the User has an email and is not\n anonymous, or, if there isn't such a user...\n (2) the first pair.\n\n The list of Watches consists of all those found in the cluster.\n\n Compares email addresses case-insensitively.\n\n \"\"\"\n def ensure_user_has_email(user, cluster_email):\n \"\"\"Make sure the user in the user-watch pair has an email address.\n\n The caller guarantees us an email from either the user or the watch. If\n the passed-in user has no email, we return an EmailUser instead having\n the email address from the watch.\n\n \"\"\"\n # Some of these cases shouldn't happen, but we're tolerant.\n if not getattr(user, 'email', ''):\n user = EmailUser(cluster_email)\n return user\n\n # TODO: Do this instead with clever SQL that somehow returns just the\n # best row for each email.\n\n cluster_email = '' # email of current cluster\n favorite_user = None # best user in cluster so far\n watches = [] # all watches in cluster\n for u, w in users_and_watches:\n # w always has at least 1 Watch. All the emails are the same.\n row_email = u.email or w[0].email\n if cluster_email.lower() != row_email.lower():\n # Starting a new cluster.\n if cluster_email != '':\n # Ship the favorites from the previous cluster:\n yield (ensure_user_has_email(favorite_user, cluster_email),\n watches)\n favorite_user, watches = u, []\n cluster_email = row_email\n elif ((not favorite_user.email or not u.is_authenticated) and\n u.email and u.is_authenticated):\n favorite_user = u\n watches.extend(w)\n if favorite_user is not None:\n yield ensure_user_has_email(favorite_user, cluster_email), watches\n",
"def filter_conditions():\n \"\"\"Return joins, WHERE conditions, and params to bind to them in\n order to check a notification against all the given filters.\"\"\"\n # Not a one-liner. You're welcome. :-)\n self._validate_filters(filters)\n joins, wheres, join_params, where_params = [], [], [], []\n for n, (k, v) in enumerate(iteritems(filters)):\n joins.append(\n 'LEFT JOIN tidings_watchfilter f{n} '\n 'ON f{n}.watch_id=w.id '\n 'AND f{n}.name=%s'.format(n=n))\n join_params.append(k)\n wheres.append('(f{n}.value=%s '\n 'OR f{n}.value IS NULL)'.format(n=n))\n where_params.append(hash_to_unsigned(v))\n return joins, wheres, join_params + where_params\n"
] | class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude)
@task
def _fire_task(self, exclude=None):
"""Build and send the emails as a celery task."""
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m])
@classmethod
def _validate_filters(cls, filters):
"""Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class."""
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k))
@classmethod
def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches
@classmethod
# Funny arg name to reserve use of nice ones for filters
def is_notifying(cls, user_or_email_, object_id=None, **filters):
"""Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response.
"""
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists()
@classmethod
def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
|
mozilla/django-tidings | tidings/events.py | Event._watches_belonging_to_user | python | def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches | Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L286-L335 | null | class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude)
@task
def _fire_task(self, exclude=None):
"""Build and send the emails as a celery task."""
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m])
@classmethod
def _validate_filters(cls, filters):
"""Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class."""
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k))
def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
"""Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in.
"""
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields))
@classmethod
@classmethod
# Funny arg name to reserve use of nice ones for filters
def is_notifying(cls, user_or_email_, object_id=None, **filters):
"""Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response.
"""
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists()
@classmethod
def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
|
mozilla/django-tidings | tidings/events.py | Event.is_notifying | python | def is_notifying(cls, user_or_email_, object_id=None, **filters):
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists() | Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L339-L361 | [
"def _watches_belonging_to_user(cls, user_or_email, object_id=None,\n **filters):\n \"\"\"Return a QuerySet of watches having the given user or email, having\n (only) the given filters, and having the event_type and content_type\n attrs of the class.\n\n Matched Watches may be either confirmed and unconfirmed. They may\n include duplicates if the get-then-create race condition in\n :meth:`notify()` allowed them to be created.\n\n If you pass an email, it will be matched against only the email\n addresses of anonymous watches. At the moment, the only integration\n point planned between anonymous and registered watches is the claiming\n of anonymous watches of the same email address on user registration\n confirmation.\n\n If you pass the AnonymousUser, this will return an empty QuerySet.\n\n \"\"\"\n # If we have trouble distinguishing subsets and such, we could store a\n # number_of_filters on the Watch.\n cls._validate_filters(filters)\n\n if isinstance(user_or_email, string_types):\n user_condition = Q(email=user_or_email)\n elif user_or_email.is_authenticated:\n user_condition = Q(user=user_or_email)\n else:\n return Watch.objects.none()\n\n # Filter by stuff in the Watch row:\n watches = getattr(Watch, 'uncached', Watch.objects).filter(\n user_condition,\n Q(content_type=ContentType.objects.get_for_model(\n cls.content_type)) if cls.content_type else Q(),\n Q(object_id=object_id) if object_id else Q(),\n event_type=cls.event_type).extra(\n where=['(SELECT count(*) FROM tidings_watchfilter WHERE '\n 'tidings_watchfilter.watch_id='\n 'tidings_watch.id)=%s'],\n params=[len(filters)])\n # Optimization: If the subselect ends up being slow, store the number\n # of filters in each Watch row or try a GROUP BY.\n\n # Apply 1-to-many filters:\n for k, v in iteritems(filters):\n watches = watches.filter(filters__name=k,\n filters__value=hash_to_unsigned(v))\n\n return watches\n"
] | class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude)
@task
def _fire_task(self, exclude=None):
"""Build and send the emails as a celery task."""
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m])
@classmethod
def _validate_filters(cls, filters):
"""Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class."""
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k))
def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
"""Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in.
"""
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields))
@classmethod
def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches
@classmethod
# Funny arg name to reserve use of nice ones for filters
@classmethod
def notify(cls, user_or_email_, object_id=None, **filters):
"""Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time.
"""
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
|
mozilla/django-tidings | tidings/events.py | Event.notify | python | def notify(cls, user_or_email_, object_id=None, **filters):
# A test-for-existence-then-create race condition exists here, but it
# doesn't matter: de-duplication on fire() and deletion of all matches
# on stop_notifying() nullify its effects.
try:
# Pick 1 if >1 are returned:
watch = cls._watches_belonging_to_user(
user_or_email_,
object_id=object_id,
**filters)[0:1].get()
except Watch.DoesNotExist:
create_kwargs = {}
if cls.content_type:
create_kwargs['content_type'] = \
ContentType.objects.get_for_model(cls.content_type)
create_kwargs['email' if isinstance(user_or_email_, string_types)
else 'user'] = user_or_email_
# Letters that can't be mistaken for other letters or numbers in
# most fonts, in case people try to type these:
distinguishable_letters = \
'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ'
secret = ''.join(random.choice(distinguishable_letters)
for x in range(10))
# Registered users don't need to confirm, but anonymous users do.
is_active = ('user' in create_kwargs or
not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES)
if object_id:
create_kwargs['object_id'] = object_id
watch = Watch.objects.create(
secret=secret,
is_active=is_active,
event_type=cls.event_type,
**create_kwargs)
for k, v in iteritems(filters):
WatchFilter.objects.create(watch=watch, name=k,
value=hash_to_unsigned(v))
# Send email for inactive watches.
if not watch.is_active:
email = watch.user.email if watch.user else watch.email
message = cls._activation_email(watch, email)
try:
message.send()
except SMTPException as e:
watch.delete()
raise ActivationRequestFailed(e.recipients)
return watch | Start notifying the given user or email address when this event
occurs and meets the criteria given in ``filters``.
Return the created (or the existing matching) Watch so you can call
:meth:`~tidings.models.Watch.activate()` on it if you're so inclined.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
Send an activation email if an anonymous watch is created and
:data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is
``True``. If the activation request fails, raise a
ActivationRequestFailed exception.
Calling :meth:`notify()` twice for an anonymous user will send the
email each time. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L364-L427 | [
"def iteritems(d, **kw):\n return iter(d.items(**kw))\n",
"def hash_to_unsigned(data):\n \"\"\"If ``data`` is a string or unicode string, return an unsigned 4-byte int\n hash of it. If ``data`` is already an int that fits those parameters,\n return it verbatim.\n\n If ``data`` is an int outside that range, behavior is undefined at the\n moment. We rely on the ``PositiveIntegerField`` on\n :class:`~tidings.models.WatchFilter` to scream if the int is too long for\n the field.\n\n We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose\n hash function, it has no collisions on a dictionary of 38,470 English\n words, which should be fine for the small sets that :class:`WatchFilters\n <tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is\n fast and available as a built-in function in some DBs. If your set of\n filter values is very large or has different CRC32 distribution properties\n than English words, you might want to do your own hashing in your\n :class:`~tidings.events.Event` subclass and pass ints when specifying\n filter values.\n\n \"\"\"\n if isinstance(data, string_types):\n # Return a CRC32 value identical across Python versions and platforms\n # by stripping the sign bit as on\n # http://docs.python.org/library/zlib.html.\n return crc32(data.encode('utf-8')) & 0xffffffff\n else:\n return int(data)\n",
"def _watches_belonging_to_user(cls, user_or_email, object_id=None,\n **filters):\n \"\"\"Return a QuerySet of watches having the given user or email, having\n (only) the given filters, and having the event_type and content_type\n attrs of the class.\n\n Matched Watches may be either confirmed and unconfirmed. They may\n include duplicates if the get-then-create race condition in\n :meth:`notify()` allowed them to be created.\n\n If you pass an email, it will be matched against only the email\n addresses of anonymous watches. At the moment, the only integration\n point planned between anonymous and registered watches is the claiming\n of anonymous watches of the same email address on user registration\n confirmation.\n\n If you pass the AnonymousUser, this will return an empty QuerySet.\n\n \"\"\"\n # If we have trouble distinguishing subsets and such, we could store a\n # number_of_filters on the Watch.\n cls._validate_filters(filters)\n\n if isinstance(user_or_email, string_types):\n user_condition = Q(email=user_or_email)\n elif user_or_email.is_authenticated:\n user_condition = Q(user=user_or_email)\n else:\n return Watch.objects.none()\n\n # Filter by stuff in the Watch row:\n watches = getattr(Watch, 'uncached', Watch.objects).filter(\n user_condition,\n Q(content_type=ContentType.objects.get_for_model(\n cls.content_type)) if cls.content_type else Q(),\n Q(object_id=object_id) if object_id else Q(),\n event_type=cls.event_type).extra(\n where=['(SELECT count(*) FROM tidings_watchfilter WHERE '\n 'tidings_watchfilter.watch_id='\n 'tidings_watch.id)=%s'],\n params=[len(filters)])\n # Optimization: If the subselect ends up being slow, store the number\n # of filters in each Watch row or try a GROUP BY.\n\n # Apply 1-to-many filters:\n for k, v in iteritems(filters):\n watches = watches.filter(filters__name=k,\n filters__value=hash_to_unsigned(v))\n\n return watches\n",
"def _activation_email(cls, watch, email):\n \"\"\"Return an EmailMessage to send to anonymous watchers.\n\n They are expected to follow the activation URL sent in the email to\n activate their watch, so you should include at least that.\n\n \"\"\"\n # TODO: basic implementation.\n return mail.EmailMessage('TODO', 'Activate!',\n settings.TIDINGS_FROM_ADDRESS,\n [email])\n"
] | class Event(object):
"""Abstract base class for events
An :class:`Event` represents, simply, something that occurs. A
:class:`~tidings.models.Watch` is a record of someone's interest in a
certain type of :class:`Event`, distinguished by ``Event.event_type``.
Fire an Event (``SomeEvent.fire()``) from the code that causes the
interesting event to occur. Fire it any time the event *might* have
occurred. The Event will determine whether conditions are right to actually
send notifications; don't succumb to the temptation to do these tests
outside the Event, because you'll end up repeating yourself if the event is
ever fired from more than one place.
:class:`Event` subclasses can optionally represent a more limited scope of
interest by populating the ``Watch.content_type`` field and/or adding
related :class:`~tidings.models.WatchFilter` rows holding name/value pairs,
the meaning of which is up to each individual subclass. NULL values are
considered wildcards.
:class:`Event` subclass instances must be pickleable so they can be
shuttled off to celery tasks.
"""
# event_type = 'hamster modified' # key for the event_type column
content_type = None # or, for example, Hamster
#: Possible filter keys, for validation only. For example:
#: ``set(['color', 'flavor'])``
filters = set()
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude)
@task
def _fire_task(self, exclude=None):
"""Build and send the emails as a celery task."""
connection = mail.get_connection(fail_silently=True)
# Warning: fail_silently swallows errors thrown by the generators, too.
connection.open()
for m in self._mails(self._users_watching(exclude=exclude)):
connection.send_messages([m])
@classmethod
def _validate_filters(cls, filters):
"""Raise a TypeError if ``filters`` contains any keys inappropriate to
this event class."""
for k in iterkeys(filters):
if k not in cls.filters:
# Mirror "unexpected keyword argument" message:
raise TypeError("%s got an unsupported filter type '%s'" %
(cls.__name__, k))
def _users_watching_by_filter(self, object_id=None, exclude=None,
**filters):
"""Return an iterable of (``User``/:class:`~tidings.models.EmailUser`,
[:class:`~tidings.models.Watch` objects]) tuples watching the event.
Of multiple Users/EmailUsers having the same email address, only one is
returned. Users are favored over EmailUsers so we are sure to be able
to, for example, include a link to a user profile in the mail.
The list of :class:`~tidings.models.Watch` objects includes both
those tied to the given User (if there is a registered user)
and to any anonymous Watch having the same email address. This
allows you to include all relevant unsubscribe URLs in a mail,
for example. It also lets you make decisions in the
:meth:`~tidings.events.EventUnion._mails()` method of
:class:`~tidings.events.EventUnion` based on the kinds of
watches found.
"Watching the event" means having a Watch whose ``event_type`` is
``self.event_type``, whose ``content_type`` is ``self.content_type`` or
``NULL``, whose ``object_id`` is ``object_id`` or ``NULL``, and whose
WatchFilter rows match as follows: each name/value pair given in
``filters`` must be matched by a related WatchFilter, or there must be
no related WatchFilter having that name. If you find yourself wanting
the lack of a particularly named WatchFilter to scuttle the match, use
a different event_type instead.
:arg exclude: If a saved user is passed in as this argument, that user
will never be returned, though anonymous watches having the same
email address may. A sequence of users may also be passed in.
"""
# I don't think we can use the ORM here, as there's no way to get a
# second condition (name=whatever) into a left join. However, if we
# were willing to have 2 subqueries run for every watch row--select
# {are there any filters with name=x?} and select {is there a filter
# with name=x and value=y?}--we could do it with extra(). Then we could
# have EventUnion simply | the QuerySets together, which would avoid
# having to merge in Python.
if exclude is None:
exclude = []
elif not isinstance(exclude, Sequence):
exclude = [exclude]
def filter_conditions():
"""Return joins, WHERE conditions, and params to bind to them in
order to check a notification against all the given filters."""
# Not a one-liner. You're welcome. :-)
self._validate_filters(filters)
joins, wheres, join_params, where_params = [], [], [], []
for n, (k, v) in enumerate(iteritems(filters)):
joins.append(
'LEFT JOIN tidings_watchfilter f{n} '
'ON f{n}.watch_id=w.id '
'AND f{n}.name=%s'.format(n=n))
join_params.append(k)
wheres.append('(f{n}.value=%s '
'OR f{n}.value IS NULL)'.format(n=n))
where_params.append(hash_to_unsigned(v))
return joins, wheres, join_params + where_params
# Apply watchfilter constraints:
joins, wheres, params = filter_conditions()
# Start off with event_type, which is always a constraint. These go in
# the `wheres` list to guarantee that the AND after the {wheres}
# substitution in the query is okay.
wheres.append('w.event_type=%s')
params.append(self.event_type)
# Constrain on other 1-to-1 attributes:
if self.content_type:
wheres.append('(w.content_type_id IS NULL '
'OR w.content_type_id=%s)')
params.append(ContentType.objects.get_for_model(
self.content_type).id)
if object_id:
wheres.append('(w.object_id IS NULL OR w.object_id=%s)')
params.append(object_id)
if exclude:
# Don't try excluding unsaved Users:1
if not all(e.id for e in exclude):
raise ValueError("Can't exclude an unsaved User.")
wheres.append('(u.id IS NULL OR u.id NOT IN (%s))' %
', '.join('%s' for e in exclude))
params.extend(e.id for e in exclude)
def get_fields(model):
if hasattr(model._meta, '_fields'):
# For django versions < 1.6
return model._meta._fields()
else:
# For django versions >= 1.6
return model._meta.fields
User = get_user_model()
model_to_fields = dict((m, [f.get_attname() for f in get_fields(m)])
for m in [User, Watch])
query_fields = [
'u.{0}'.format(field) for field in model_to_fields[User]]
query_fields.extend([
'w.{0}'.format(field) for field in model_to_fields[Watch]])
query = (
'SELECT {fields} '
'FROM tidings_watch w '
'LEFT JOIN {user_table} u ON u.id=w.user_id {joins} '
'WHERE {wheres} '
'AND (length(w.email)>0 OR length(u.email)>0) '
'AND w.is_active '
'ORDER BY u.email DESC, w.email DESC').format(
fields=', '.join(query_fields),
joins=' '.join(joins),
wheres=' AND '.join(wheres),
user_table=User._meta.db_table)
# IIRC, the DESC ordering was something to do with the placement of
# NULLs. Track this down and explain it.
# Put watch in a list just for consistency. Once the pairs go through
# _unique_by_email, watches will be in a list, and EventUnion uses the
# same function to union already-list-enclosed pairs from individual
# events.
return _unique_by_email((u, [w]) for u, w in
multi_raw(query, params, [User, Watch],
model_to_fields))
@classmethod
def _watches_belonging_to_user(cls, user_or_email, object_id=None,
**filters):
"""Return a QuerySet of watches having the given user or email, having
(only) the given filters, and having the event_type and content_type
attrs of the class.
Matched Watches may be either confirmed and unconfirmed. They may
include duplicates if the get-then-create race condition in
:meth:`notify()` allowed them to be created.
If you pass an email, it will be matched against only the email
addresses of anonymous watches. At the moment, the only integration
point planned between anonymous and registered watches is the claiming
of anonymous watches of the same email address on user registration
confirmation.
If you pass the AnonymousUser, this will return an empty QuerySet.
"""
# If we have trouble distinguishing subsets and such, we could store a
# number_of_filters on the Watch.
cls._validate_filters(filters)
if isinstance(user_or_email, string_types):
user_condition = Q(email=user_or_email)
elif user_or_email.is_authenticated:
user_condition = Q(user=user_or_email)
else:
return Watch.objects.none()
# Filter by stuff in the Watch row:
watches = getattr(Watch, 'uncached', Watch.objects).filter(
user_condition,
Q(content_type=ContentType.objects.get_for_model(
cls.content_type)) if cls.content_type else Q(),
Q(object_id=object_id) if object_id else Q(),
event_type=cls.event_type).extra(
where=['(SELECT count(*) FROM tidings_watchfilter WHERE '
'tidings_watchfilter.watch_id='
'tidings_watch.id)=%s'],
params=[len(filters)])
# Optimization: If the subselect ends up being slow, store the number
# of filters in each Watch row or try a GROUP BY.
# Apply 1-to-many filters:
for k, v in iteritems(filters):
watches = watches.filter(filters__name=k,
filters__value=hash_to_unsigned(v))
return watches
@classmethod
# Funny arg name to reserve use of nice ones for filters
def is_notifying(cls, user_or_email_, object_id=None, **filters):
"""Return whether the user/email is watching this event (either
active or inactive watches), conditional on meeting the criteria in
``filters``.
Count only watches that match the given filters exactly--not ones which
match merely a superset of them. This lets callers distinguish between
watches which overlap in scope. Equivalently, this lets callers check
whether :meth:`notify()` has been called with these arguments.
Implementations in subclasses may take different arguments--for
example, to assume certain filters--though most will probably just use
this. However, subclasses should clearly document what filters they
supports and the meaning of each.
Passing this an ``AnonymousUser`` always returns ``False``. This means
you can always pass it ``request.user`` in a view and get a sensible
response.
"""
return cls._watches_belonging_to_user(user_or_email_,
object_id=object_id,
**filters).exists()
@classmethod
@classmethod
def stop_notifying(cls, user_or_email_, **filters):
"""Delete all watches matching the exact user/email and filters.
Delete both active and inactive watches. If duplicate watches
exist due to the get-then-create race condition, delete them all.
Implementations in subclasses may take different arguments; see the
docstring of :meth:`is_notifying()`.
"""
cls._watches_belonging_to_user(user_or_email_, **filters).delete()
# TODO: If GenericForeignKeys don't give us cascading deletes, make a
# stop_notifying_all(**filters) or something. It should delete any watch of
# the class's event_type and content_type and having filters matching each
# of **filters. Even if there are additional filters on a watch, that watch
# should still be deleted so we can delete, for example, any watch that
# references a certain Question instance. To do that, factor such that you
# can effectively call _watches_belonging_to_user() without it calling
# extra().
# Subclasses should implement the following:
def _mails(self, users_and_watches):
"""Return an iterable yielding an EmailMessage to send to each user.
:arg users_and_watches: an iterable of (User or EmailUser, [Watches])
pairs where the first element is the user to send to and the second
is a list of watches (usually just one) that indicated the
user's interest in this event
:meth:`~tidings.utils.emails_with_users_and_watches()` can come in
handy for generating mails from Django templates.
"""
# Did this instead of mail() because a common case might be sending the
# same mail to many users. mail() would make it difficult to avoid
# redoing the templating every time.
raise NotImplementedError
def _users_watching(self, **kwargs):
"""Return an iterable of Users and EmailUsers watching this event
and the Watches that map them to it.
Each yielded item is a tuple: (User or EmailUser, [list of Watches]).
Default implementation returns users watching this object's event_type
and, if defined, content_type.
"""
return self._users_watching_by_filter(**kwargs)
@classmethod
def _activation_email(cls, watch, email):
"""Return an EmailMessage to send to anonymous watchers.
They are expected to follow the activation URL sent in the email to
activate their watch, so you should include at least that.
"""
# TODO: basic implementation.
return mail.EmailMessage('TODO', 'Activate!',
settings.TIDINGS_FROM_ADDRESS,
[email])
@classmethod
def _activation_url(cls, watch):
"""Return a URL pointing to a view which :meth:`activates
<tidings.models.Watch.activate()>` a watch.
TODO: provide generic implementation of this before liberating.
Generic implementation could involve a setting to the default
``reverse()`` path, e.g. ``'tidings.activate_watch'``.
"""
raise NotImplementedError
@classmethod
def description_of_watch(cls, watch):
"""Return a description of the Watch which can be used in emails.
For example, "changes to English articles"
"""
raise NotImplementedError
|
mozilla/django-tidings | tidings/events.py | InstanceEvent.notify | python | def notify(cls, user_or_email, instance):
return super(InstanceEvent, cls).notify(user_or_email,
object_id=instance.pk) | Create, save, and return a watch which fires when something
happens to ``instance``. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L583-L587 | null | class InstanceEvent(Event):
"""Abstract superclass for watching a specific instance of a Model.
Subclasses must specify an ``event_type`` and should specify a
``content_type``.
"""
def __init__(self, instance, *args, **kwargs):
"""Initialize an InstanceEvent
:arg instance: the instance someone would have to be watching in
order to be notified when this event is fired.
"""
super(InstanceEvent, self).__init__(*args, **kwargs)
self.instance = instance
@classmethod
@classmethod
def stop_notifying(cls, user_or_email, instance):
"""Delete the watch created by notify."""
super(InstanceEvent, cls).stop_notifying(user_or_email,
object_id=instance.pk)
@classmethod
def is_notifying(cls, user_or_email, instance):
"""Check if the watch created by notify exists."""
return super(InstanceEvent, cls).is_notifying(user_or_email,
object_id=instance.pk)
def _users_watching(self, **kwargs):
"""Return users watching this instance."""
return self._users_watching_by_filter(object_id=self.instance.pk,
**kwargs)
|
mozilla/django-tidings | tidings/events.py | InstanceEvent.stop_notifying | python | def stop_notifying(cls, user_or_email, instance):
super(InstanceEvent, cls).stop_notifying(user_or_email,
object_id=instance.pk) | Delete the watch created by notify. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L590-L593 | null | class InstanceEvent(Event):
"""Abstract superclass for watching a specific instance of a Model.
Subclasses must specify an ``event_type`` and should specify a
``content_type``.
"""
def __init__(self, instance, *args, **kwargs):
"""Initialize an InstanceEvent
:arg instance: the instance someone would have to be watching in
order to be notified when this event is fired.
"""
super(InstanceEvent, self).__init__(*args, **kwargs)
self.instance = instance
@classmethod
def notify(cls, user_or_email, instance):
"""Create, save, and return a watch which fires when something
happens to ``instance``."""
return super(InstanceEvent, cls).notify(user_or_email,
object_id=instance.pk)
@classmethod
@classmethod
def is_notifying(cls, user_or_email, instance):
"""Check if the watch created by notify exists."""
return super(InstanceEvent, cls).is_notifying(user_or_email,
object_id=instance.pk)
def _users_watching(self, **kwargs):
"""Return users watching this instance."""
return self._users_watching_by_filter(object_id=self.instance.pk,
**kwargs)
|
mozilla/django-tidings | tidings/events.py | InstanceEvent.is_notifying | python | def is_notifying(cls, user_or_email, instance):
return super(InstanceEvent, cls).is_notifying(user_or_email,
object_id=instance.pk) | Check if the watch created by notify exists. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L596-L599 | null | class InstanceEvent(Event):
"""Abstract superclass for watching a specific instance of a Model.
Subclasses must specify an ``event_type`` and should specify a
``content_type``.
"""
def __init__(self, instance, *args, **kwargs):
"""Initialize an InstanceEvent
:arg instance: the instance someone would have to be watching in
order to be notified when this event is fired.
"""
super(InstanceEvent, self).__init__(*args, **kwargs)
self.instance = instance
@classmethod
def notify(cls, user_or_email, instance):
"""Create, save, and return a watch which fires when something
happens to ``instance``."""
return super(InstanceEvent, cls).notify(user_or_email,
object_id=instance.pk)
@classmethod
def stop_notifying(cls, user_or_email, instance):
"""Delete the watch created by notify."""
super(InstanceEvent, cls).stop_notifying(user_or_email,
object_id=instance.pk)
@classmethod
def _users_watching(self, **kwargs):
"""Return users watching this instance."""
return self._users_watching_by_filter(object_id=self.instance.pk,
**kwargs)
|
mozilla/django-tidings | tidings/events.py | InstanceEvent._users_watching | python | def _users_watching(self, **kwargs):
return self._users_watching_by_filter(object_id=self.instance.pk,
**kwargs) | Return users watching this instance. | train | https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/events.py#L601-L604 | null | class InstanceEvent(Event):
"""Abstract superclass for watching a specific instance of a Model.
Subclasses must specify an ``event_type`` and should specify a
``content_type``.
"""
def __init__(self, instance, *args, **kwargs):
"""Initialize an InstanceEvent
:arg instance: the instance someone would have to be watching in
order to be notified when this event is fired.
"""
super(InstanceEvent, self).__init__(*args, **kwargs)
self.instance = instance
@classmethod
def notify(cls, user_or_email, instance):
"""Create, save, and return a watch which fires when something
happens to ``instance``."""
return super(InstanceEvent, cls).notify(user_or_email,
object_id=instance.pk)
@classmethod
def stop_notifying(cls, user_or_email, instance):
"""Delete the watch created by notify."""
super(InstanceEvent, cls).stop_notifying(user_or_email,
object_id=instance.pk)
@classmethod
def is_notifying(cls, user_or_email, instance):
"""Check if the watch created by notify exists."""
return super(InstanceEvent, cls).is_notifying(user_or_email,
object_id=instance.pk)
|
deeplook/svglib | svglib/svglib.py | find_font | python | def find_font(font_name):
if font_name in STANDARD_FONT_NAMES:
return font_name, True
elif font_name in _registered_fonts:
return font_name, _registered_fonts[font_name]
NOT_FOUND = (None, False)
try:
# Try first to register the font if it exists as ttf,
# based on ReportLab font search.
registerFont(TTFont(font_name, '%s.ttf' % font_name))
_registered_fonts[font_name] = True
return font_name, True
except TTFError:
# Try searching with Fontconfig
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', font_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = pipe.communicate()[0].decode(sys.getfilesystemencoding())
font_path = output.split('\n')[0]
except OSError:
return NOT_FOUND
try:
registerFont(TTFont(font_name, font_path))
except TTFError:
return NOT_FOUND
# Fontconfig may return a default font totally unrelated with font_name
exact = font_name.lower() in os.path.basename(font_path).lower()
_registered_fonts[font_name] = exact
return font_name, exact | Return the font and a Boolean indicating if the match is exact. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L79-L112 | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""A library for reading and converting SVG.
This is a converter from SVG to RLG (ReportLab Graphics) drawings.
It converts mainly basic shapes, paths and simple text. The intended
usage is either as module within other projects:
from svglib.svglib import svg2rlg
drawing = svg2rlg("foo.svg")
or from the command-line where it is usable as an SVG to PDF converting
tool named sv2pdf (which should also handle SVG files compressed with
gzip and extension .svgz).
"""
import copy
import gzip
import itertools
import logging
import os
import re
import base64
import tempfile
import shlex
import shutil
import subprocess
import sys
from collections import defaultdict, namedtuple
from functools import partial
from reportlab.pdfbase.pdfmetrics import registerFont, stringWidth
from reportlab.pdfbase.ttfonts import TTFError, TTFont
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
from reportlab.pdfgen.pdfimages import PDFImage
from reportlab.graphics.shapes import (
_CLOSEPATH, Circle, Drawing, Ellipse, Group, Image, Line, Path, PolyLine,
Polygon, Rect, String,
)
from reportlab.lib import colors
from reportlab.lib.units import pica, toLength
from reportlab.lib.utils import haveImages
from lxml import etree
import cssselect2
import tinycss2
from .utils import (
bezier_arc_from_end_points, convert_quadratic_to_cubic_path,
normalise_svg_path,
)
__version__ = '0.9.0'
__license__ = 'LGPL 3'
__author__ = 'Dinu Gherman'
__date__ = '2018-12-08'
XML_NS = 'http://www.w3.org/XML/1998/namespace'
# A sentinel to identify a situation where a node reference a fragment not yet defined.
DELAYED = object()
STANDARD_FONT_NAMES = (
'Times-Roman', 'Times-Italic', 'Times-Bold', 'Times-BoldItalic',
'Helvetica', 'Helvetica-Oblique', 'Helvetica-Bold', 'Helvetica-BoldOblique',
'Courier', 'Courier-Oblique', 'Courier-Bold', 'Courier-BoldOblique',
'Symbol', 'ZapfDingbats',
)
DEFAULT_FONT_NAME = "Helvetica"
_registered_fonts = {}
logger = logging.getLogger(__name__)
Box = namedtuple('Box', ['x', 'y', 'width', 'height'])
split_whitespace = re.compile(r'[^ \t\r\n\f]+').findall
class NoStrokePath(Path):
"""
This path object never gets a stroke width whatever the properties it's
getting assigned.
"""
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
Path.__init__(self, *args, **kwargs) # we're old-style class on PY2
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
def getProperties(self, *args, **kwargs):
# __getattribute__ wouldn't suit, as RL is directly accessing self.__dict__
props = Path.getProperties(self, *args, **kwargs)
if 'strokeWidth' in props:
props['strokeWidth'] = 0
if 'strokeColor' in props:
props['strokeColor'] = None
return props
class ClippingPath(Path):
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
Path.__init__(self, *args, **kwargs)
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
self.isClipPath = 1
def getProperties(self, *args, **kwargs):
props = Path.getProperties(self, *args, **kwargs)
if 'fillColor' in props:
props['fillColor'] = None
return props
class CSSMatcher(cssselect2.Matcher):
def __init__(self, style_content):
super(CSSMatcher, self).__init__()
self.rules = tinycss2.parse_stylesheet(
style_content, skip_comments=True, skip_whitespace=True
)
for rule in self.rules:
if not rule.prelude:
continue
selectors = cssselect2.compile_selector_list(rule.prelude)
selector_string = tinycss2.serialize(rule.prelude)
content_dict = dict(
(attr.split(':')[0].strip(), attr.split(':')[1].strip())
for attr in tinycss2.serialize(rule.content).split(';')
if ':' in attr
)
payload = (selector_string, content_dict)
for selector in selectors:
self.add_selector(selector, payload)
# Attribute converters (from SVG to RLG)
class AttributeConverter(object):
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return ''
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result
class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def __init__(self, color_converter=None):
super(Svg2RlgAttributeConverter, self).__init__()
self.color_converter = color_converter or self.identity_color_converter
@staticmethod
def identity_color_converter(c):
return c
@staticmethod
def split_attr_list(attr):
return shlex.split(attr.strip().replace(',', ' '))
def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length
def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
def convertOpacity(self, svgAttr):
return float(svgAttr)
def convertFillRule(self, svgAttr):
return {
'nonzero': FILL_NON_ZERO,
'evenodd': FILL_EVEN_ODD,
}.get(svgAttr, '')
def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None
def convertLineJoin(self, svgAttr):
return {"miter":0, "round":1, "bevel":2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt":0, "round":1, "square":2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
if not svgAttr:
return ''
# very hackish
font_mapping = {
"sans-serif": "Helvetica",
"serif": "Times-Roman",
"times": "Times-Roman",
"monospace": "Courier",
}
font_names = [
font_mapping.get(font_name.lower(), font_name)
for font_name in self.split_attr_list(svgAttr)
]
non_exact_matches = []
for font_name in font_names:
font_name, exact = find_font(font_name)
if exact:
return font_name
elif font_name:
non_exact_matches.append(font_name)
if non_exact_matches:
return non_exact_matches[0]
else:
logger.warning("Unable to find a suitable font for 'font-family:%s'" % svgAttr)
return DEFAULT_FONT_NAME
class ElementWrapper(object):
"""
lxml element wrapper to partially match the API from cssselect2.ElementWrapper
so as element can be passed to rules.match().
"""
in_html_document = False
def __init__(self, obj):
self.object = obj
@property
def id(self):
return self.object.attrib.get('id')
@property
def etree_element(self):
return self.object
@property
def parent(self):
par = self.object.getparent()
return ElementWrapper(par) if par is not None else None
@property
def classes(self):
cl = self.object.attrib.get('class')
return split_whitespace(cl) if cl is not None else []
@property
def local_name(self):
return node_name(self.object)
@property
def namespace_url(self):
if '}' in self.object.tag:
self.object.tag.split('}')[0][1:]
def iter_ancestors(self):
element = self
while element.parent is not None:
element = element.parent
yield element
def apply_rules(self, rules):
matches = rules.match(self)
for match in matches:
attr_dict = match[3][1]
for attr, val in attr_dict.items():
if not attr in self.object.attrib:
try:
self.object.attrib[attr] = val
except ValueError:
pass
# Set marker on the node to not apply rules more than once
self.object.set('__rules_applied', '1')
class NodeTracker(ElementWrapper):
"""An object wrapper keeping track of arguments to certain method calls.
Instances wrap an object and store all arguments to one special
method, getAttribute(name), in a list of unique elements, usedAttrs.
"""
def __init__(self, obj):
super(NodeTracker, self).__init__(obj)
self.usedAttrs = []
def getAttribute(self, name):
# add argument to the history, if not already present
if name not in self.usedAttrs:
self.usedAttrs.append(name)
# forward call to wrapped object
return self.object.attrib.get(name, '')
def __getattr__(self, name):
# forward attribute access to wrapped object
return getattr(self.object, name)
class CircularRefError(Exception):
pass
class ExternalSVG:
def __init__(self, path, renderer):
self.root_node = load_svg_file(path)
self.renderer = SvgRenderer(path, parent_svgs=renderer._parent_chain + [renderer.source_path])
self.rendered = False
def get_fragment(self, fragment):
if not self.rendered:
self.renderer.render(self.root_node)
self.rendered = True
return self.renderer.definitions.get(fragment)
### the main meat ###
class SvgRenderer:
"""Renderer that renders an SVG file on a ReportLab Drawing instance.
This is the base class for walking over an SVG DOM document and
transforming it into a ReportLab Drawing instance.
"""
def __init__(self, path, color_converter=None, parent_svgs=None):
self.source_path = path
self._parent_chain = parent_svgs or [] # To detect circular refs.
self.attrConverter = Svg2RlgAttributeConverter(color_converter=color_converter)
self.shape_converter = Svg2RlgShapeConverter(path, self.attrConverter)
self.handled_shapes = self.shape_converter.get_handled_shapes()
self.definitions = {}
self.waiting_use_nodes = defaultdict(list)
self._external_svgs = {}
def render(self, svg_node):
node = NodeTracker(svg_node)
main_group = self.renderSvg(node, outermost=True)
for xlink in self.waiting_use_nodes.keys():
logger.debug("Ignoring unavailable object width ID '%s'." % xlink)
view_box = self.get_box(node, default_box=True)
main_group.translate(0 - view_box.x, -view_box.height - view_box.y)
width, height = svg_node.attrib.get("width"), svg_node.attrib.get("height")
width, height = map(self.attrConverter.convertLength, (width, height))
drawing = Drawing(width, height)
drawing.add(main_group)
return drawing
def renderNode(self, node, parent=None):
n = NodeTracker(node)
nid = n.getAttribute("id")
ignored = False
item = None
name = node_name(node)
clipping = self.get_clippath(n)
if name == "svg":
item = self.renderSvg(n)
parent.add(item)
elif name == "defs":
item = self.renderG(n)
elif name == 'a':
item = self.renderA(n)
parent.add(item)
elif name == 'g':
display = n.getAttribute("display")
item = self.renderG(n, clipping=clipping)
if display != "none":
parent.add(item)
elif name == "style":
self.renderStyle(n)
elif name == "symbol":
item = self.renderSymbol(n)
parent.add(item)
elif name == "use":
item = self.renderUse(n, clipping=clipping)
parent.add(item)
elif name == "clipPath":
item = self.renderG(n)
elif name in self.handled_shapes:
if name == 'image':
# We resolve the image target at renderer level because it can point
# to another SVG file or node which has to be rendered too.
target = self.xlink_href_target(n)
if target is None:
return
elif isinstance(target, tuple):
# This is SVG content needed to be rendered
gr = Group()
renderer, node = target
renderer.renderNode(node, parent=gr)
self.apply_node_attr_to_group(n, gr)
parent.add(gr)
return
else:
# Attaching target to node, so we can get it back in convertImage
n._resolved_target = target
item = self.shape_converter.convertShape(name, n, clipping)
display = n.getAttribute("display")
if item and display != "none":
parent.add(item)
else:
ignored = True
logger.debug("Ignoring node: %s" % name)
if not ignored:
if nid and item:
self.definitions[nid] = node
if nid in self.waiting_use_nodes.keys():
to_render = self.waiting_use_nodes.pop(nid)
for use_node, group in to_render:
self.renderUse(use_node, group=group)
self.print_unused_attributes(node, n)
def get_clippath(self, node):
"""
Return the clipping Path object referenced by the node 'clip-path'
attribute, if any.
"""
def get_path_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
else:
return get_path_from_node(child)
clip_path = node.getAttribute('clip-path')
if clip_path:
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if m:
ref = m.groups()[0]
if ref in self.definitions:
path = get_path_from_node(self.definitions[ref])
if path:
path = ClippingPath(copy_from=path)
return path
def print_unused_attributes(self, node, n):
if logger.level > logging.DEBUG:
return
all_attrs = self.attrConverter.getAllAttributes(node).keys()
unused_attrs = [attr for attr in all_attrs if attr not in n.usedAttrs]
if unused_attrs:
logger.debug("Unused attrs: %s %s" % (node_name(n), unused_attrs))
def apply_node_attr_to_group(self, node, group):
getAttr = node.getAttribute
transform, x, y = map(getAttr, ("transform", "x", "y"))
if x or y:
transform += " translate(%s, %s)" % (x or '0', y or '0')
if transform:
self.shape_converter.applyTransformOnGroup(transform, group)
def xlink_href_target(self, node, group=None):
"""
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
"""
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED
def renderTitle_(self, node):
# Main SVG title attr. could be used in the PDF document info field.
pass
def renderDesc_(self, node):
# Main SVG desc. attr. could be used in the PDF document info field.
pass
def get_box(self, svg_node, default_box=False):
view_box = svg_node.getAttribute("viewBox")
if view_box:
view_box = self.attrConverter.convertLengthList(view_box)
return Box(*view_box)
if default_box:
width, height = map(svg_node.getAttribute, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
return Box(0, 0, width, height)
def renderSvg(self, node, outermost=False):
getAttr = node.getAttribute
_saved_preserve_space = self.shape_converter.preserve_space
self.shape_converter.preserve_space = getAttr("{%s}space" % XML_NS) == 'preserve'
group = Group()
for child in node.getchildren():
self.renderNode(child, group)
self.shape_converter.preserve_space = _saved_preserve_space
# Translating
if not outermost:
x, y = map(getAttr, ("x", "y"))
x, y = map(self.attrConverter.convertLength, (x, y))
if x or y:
group.translate(x or 0, y or 0)
# Scaling
view_box = self.get_box(node)
if not view_box and outermost:
# Apply only the 'reverse' y-scaling (PDF 0,0 is bottom left)
group.scale(1, -1)
elif view_box:
x_scale, y_scale = 1, 1
width, height = map(getAttr, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
if view_box.height != height:
y_scale = height / view_box.height
if view_box.width != width:
x_scale = width / view_box.width
group.scale(x_scale, y_scale * (-1 if outermost else 1))
return group
def renderG(self, node, clipping=None, display=1):
getAttr = node.getAttribute
id, transform = map(getAttr, ("id", "transform"))
gr = Group()
if clipping:
gr.add(clipping)
for child in node.getchildren():
item = self.renderNode(child, parent=gr)
if item and display:
gr.add(item)
if transform:
self.shape_converter.applyTransformOnGroup(transform, gr)
return gr
def renderStyle(self, node):
self.attrConverter.css_rules = CSSMatcher(node.text)
def renderSymbol(self, node):
return self.renderG(node, display=0)
def renderA(self, node):
# currently nothing but a group...
# there is no linking info stored in shapes, maybe a group should?
return self.renderG(node)
def renderUse(self, node, group=None, clipping=None):
if group is None:
group = Group()
try:
item = self.xlink_href_target(node, group=group)
except CircularRefError:
node.parent.object.remove(node.object)
return group
if item is None:
return
elif isinstance(item, str):
logger.error("<use> nodes cannot reference bitmap image files")
return
elif item is DELAYED:
return group
else:
item = item[1] # [0] is the renderer, not used here.
if clipping:
group.add(clipping)
if len(node.getchildren()) == 0:
# Append a copy of the referenced node as the <use> child (if not already done)
node.append(copy.deepcopy(item))
self.renderNode(node.getchildren()[-1], parent=group)
self.apply_node_attr_to_group(node, group)
return group
class SvgShapeConverter:
"""An abstract SVG shape converter.
Implement subclasses with methods named 'renderX(node)', where
'X' should be the capitalised name of an SVG node element for
shapes, like 'Rect', 'Circle', 'Line', etc.
Each of these methods should return a shape object appropriate
for the target format.
"""
def __init__(self, path, attrConverter=None):
self.attrConverter = attrConverter or Svg2RlgAttributeConverter()
self.svg_source_file = path
self.preserve_space = False
@classmethod
def get_handled_shapes(cls):
"""Dynamically determine a list of handled shape elements based on
convert<shape> method existence.
"""
return [key[7:].lower() for key in dir(cls) if key.startswith('convert')]
class Svg2RlgShapeConverter(SvgShapeConverter):
"""Converter from SVG shapes to RLG (ReportLab Graphics) shapes."""
def convertShape(self, name, node, clipping=None):
method_name = "convert%s" % name.capitalize()
shape = getattr(self, method_name)(node)
if not shape:
return
if name not in ('path', 'polyline', 'text'):
# Only apply style where the convert method did not apply it.
self.applyStyleOnShape(shape, node)
transform = node.getAttribute("transform")
if not (transform or clipping):
return shape
else:
group = Group()
if transform:
self.applyTransformOnGroup(transform, group)
if clipping:
group.add(clipping)
group.add(shape)
return group
def convertLine(self, node):
getAttr = node.getAttribute
x1, y1, x2, y2 = map(getAttr, ("x1", "y1", "x2", "y2"))
x1, y1, x2, y2 = map(self.attrConverter.convertLength, (x1, y1, x2, y2))
shape = Line(x1, y1, x2, y2)
return shape
def convertRect(self, node):
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
rx, ry = map(getAttr, ("rx", "ry"))
rx, ry = map(self.attrConverter.convertLength, (rx, ry))
shape = Rect(x, y, width, height, rx=rx, ry=ry)
return shape
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
getAttr = node.getAttribute
cx, cy, r = map(getAttr, ("cx", "cy", 'r'))
cx, cy, r = map(self.attrConverter.convertLength, (cx, cy, r))
shape = Circle(cx, cy, r)
return shape
def convertEllipse(self, node):
getAttr = node.getAttribute
cx, cy, rx, ry = map(getAttr, ("cx", "cy", "rx", "ry"))
cx, cy, rx, ry = map(self.attrConverter.convertLength, (cx, cy, rx, ry))
width, height = rx, ry
shape = Ellipse(cx, cy, width, height)
return shape
def convertPolyline(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polyline
return None
polyline = PolyLine(points)
self.applyStyleOnShape(polyline, node)
has_fill = self.attrConverter.findAttr(node, 'fill') not in ('', 'none')
if has_fill:
# ReportLab doesn't fill polylines, so we are creating a polygon
# polygon copy of the polyline, but without stroke.
group = Group()
polygon = Polygon(points)
self.applyStyleOnShape(polygon, node)
polygon.strokeColor = None
group.add(polygon)
group.add(polyline)
return group
return polyline
def convertPolygon(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polygon
return None
shape = Polygon(points)
return shape
def clean_text(self, text, preserve_space):
"""Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace
"""
if text is None:
return
if preserve_space:
text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ')
else:
text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ')
text = text.strip()
while (' ' in text):
text = text.replace(' ', ' ')
return text
def convertText(self, node):
attrConv = self.attrConverter
xml_space = node.getAttribute("{%s}space" % XML_NS)
if xml_space:
preserve_space = xml_space == 'preserve'
else:
preserve_space = self.preserve_space
gr = Group()
frag_lengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or DEFAULT_FONT_NAME
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = attrConv.convertLength(fs)
convertLength = partial(attrConv.convertLength, em_base=fs)
x, y = map(node.getAttribute, ('x', 'y'))
x, y = map(convertLength, (x, y))
for c in itertools.chain([node], node.getchildren()):
has_x, has_y = False, False
dx, dy = 0, 0
baseLineShift = 0
if node_name(c) == 'text':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
elif node_name(c) == 'tspan':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
x1, y1, dx, dy = [c.attrib.get(name, '') for name in ("x", "y", "dx", "dy")]
has_x, has_y = (x1 != '', y1 != '')
x1, y1, dx, dy = map(convertLength, (x1, y1, dx, dy))
dx0 = dx0 + dx
dy0 = dy0 + dy
baseLineShift = c.attrib.get("baseline-shift", '0')
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub":-fs/2, "super":fs/2, "baseline":0}[baseLineShift]
else:
baseLineShift = convertLength(baseLineShift, fs)
else:
continue
frag_lengths.append(stringWidth(text, ff, fs))
new_x = (x1 + dx) if has_x else (x + dx0 + sum(frag_lengths[:-1]))
new_y = (y1 + dy) if has_y else (y + dy0)
shape = String(new_x, -(new_y - baseLineShift), text)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
if not d:
return None
normPath = normalise_svg_path(d)
path = Path()
points = path.points
# Track subpaths needing to be closed later
unclosed_subpath_pointers = []
subpath_start = []
lastop = ''
for i in range(0, len(normPath), 2):
op, nums = normPath[i:i+2]
if op in ('m', 'M') and i > 0 and path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
# moveto absolute
if op == 'M':
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto absolute
elif op == 'L':
path.lineTo(*nums)
# moveto relative
elif op == 'm':
if len(points) >= 2:
if lastop in ('Z', 'z'):
starting_point = subpath_start
else:
starting_point = points[-2:]
xn, yn = starting_point[0] + nums[0], starting_point[1] + nums[1]
path.moveTo(xn, yn)
else:
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto relative
elif op == 'l':
xn, yn = points[-2] + nums[0], points[-1] + nums[1]
path.lineTo(xn, yn)
# horizontal/vertical line absolute
elif op == 'H':
path.lineTo(nums[0], points[-1])
elif op == 'V':
path.lineTo(points[-2], nums[0])
# horizontal/vertical line relative
elif op == 'h':
path.lineTo(points[-2] + nums[0], points[-1])
elif op == 'v':
path.lineTo(points[-2], points[-1] + nums[0])
# cubic bezier, absolute
elif op == 'C':
path.curveTo(*nums)
elif op == 'S':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x2, y2, xn, yn)
# cubic bezier, relative
elif op == 'c':
xp, yp = points[-2:]
x1, y1, x2, y2, xn, yn = nums
path.curveTo(xp + x1, yp + y1, xp + x2, yp + y2, xp + xn, yp + yn)
elif op == 's':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x0 + x2, y0 + y2, x0 + xn, y0 + yn)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 'T':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0 + x1, y0 + y1, x0 + xn, y0 + yn
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 't':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
x0, y0 = points[-2:]
xn, yn = nums
xn, yn = x0 + xn, y0 + yn
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# elliptical arc
elif op in ('A', 'a'):
rx, ry, phi, fA, fS, x2, y2 = nums
x1, y1 = points[-2:]
if op == 'a':
x2 += x1
y2 += y1
if abs(rx) <= 1e-10 or abs(ry) <= 1e-10:
path.lineTo(x2, y2)
else:
bp = bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2)
for _, _, x1, y1, x2, y2, xn, yn in bp:
path.curveTo(x1, y1, x2, y2, xn, yn)
# close path
elif op in ('Z', 'z'):
path.closePath()
else:
logger.debug("Suspicious path operator: %s" % op)
lastop = op
gr = Group()
self.applyStyleOnShape(path, node)
if path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
if unclosed_subpath_pointers and path.fillColor is not None:
# ReportLab doesn't fill unclosed paths, so we are creating a copy
# of the path with all subpaths closed, but without stroke.
# https://bitbucket.org/rptlab/reportlab/issues/99/
closed_path = NoStrokePath(copy_from=path)
for pointer in reversed(unclosed_subpath_pointers):
closed_path.operators.insert(pointer, _CLOSEPATH)
gr.add(closed_path)
path.fillColor = None
gr.add(path)
return gr
def convertImage(self, node):
if not haveImages:
logger.warning(
"Unable to handle embedded images. Maybe the pillow library is missing?"
)
return None
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
image = node._resolved_target
image = Image(int(x), int(y + height), int(width), int(height), image)
group = Group(image)
group.translate(0, (y + height) * 2)
group.scale(1, -1)
return group
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values))
def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity
def svg2rlg(path, **kwargs):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
unzipped = False
if isinstance(path, str) and os.path.splitext(path)[1].lower() == ".svgz":
with gzip.open(path, 'rb') as f_in, open(path[:-1], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
path = path[:-1]
unzipped = True
svg_root = load_svg_file(path)
if svg_root is None:
return
# convert to a RLG drawing
svgRenderer = SvgRenderer(path, **kwargs)
drawing = svgRenderer.render(svg_root)
# remove unzipped .svgz file (.svg)
if unzipped:
os.remove(path)
return drawing
def load_svg_file(path):
parser = etree.XMLParser(remove_comments=True, recover=True)
try:
doc = etree.parse(path, parser=parser)
svg_root = doc.getroot()
except Exception as exc:
logger.error("Failed to load input file! (%s)" % exc)
else:
return svg_root
def node_name(node):
"""Return lxml node name without the namespace prefix."""
try:
return node.tag.split('}')[-1]
except AttributeError:
pass
def monkeypatch_reportlab():
"""
https://bitbucket.org/rptlab/reportlab/issues/95/
ReportLab always use 'Even-Odd' filling mode for paths, this patch forces
RL to honor the path fill rule mode (possibly 'Non-Zero Winding') instead.
"""
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics import shapes
original_renderPath = shapes._renderPath
def patchedRenderPath(path, drawFuncs, **kwargs):
# Patched method to transfer fillRule from Path to PDFPathObject
# Get back from bound method to instance
try:
drawFuncs[0].__self__.fillMode = path._fillRule
except AttributeError:
pass
return original_renderPath(path, drawFuncs, **kwargs)
shapes._renderPath = patchedRenderPath
original_drawPath = Canvas.drawPath
def patchedDrawPath(self, path, **kwargs):
current = self._fillMode
if hasattr(path, 'fillMode'):
self._fillMode = path.fillMode
else:
self._fillMode = FILL_NON_ZERO
original_drawPath(self, path, **kwargs)
self._fillMode = current
Canvas.drawPath = patchedDrawPath
monkeypatch_reportlab()
|
deeplook/svglib | svglib/svglib.py | svg2rlg | python | def svg2rlg(path, **kwargs):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
unzipped = False
if isinstance(path, str) and os.path.splitext(path)[1].lower() == ".svgz":
with gzip.open(path, 'rb') as f_in, open(path[:-1], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
path = path[:-1]
unzipped = True
svg_root = load_svg_file(path)
if svg_root is None:
return
# convert to a RLG drawing
svgRenderer = SvgRenderer(path, **kwargs)
drawing = svgRenderer.render(svg_root)
# remove unzipped .svgz file (.svg)
if unzipped:
os.remove(path)
return drawing | Convert an SVG file to an RLG Drawing object. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L1324-L1347 | [
"def load_svg_file(path):\n parser = etree.XMLParser(remove_comments=True, recover=True)\n try:\n doc = etree.parse(path, parser=parser)\n svg_root = doc.getroot()\n except Exception as exc:\n logger.error(\"Failed to load input file! (%s)\" % exc)\n else:\n return svg_root\n",
"def render(self, svg_node):\n node = NodeTracker(svg_node)\n main_group = self.renderSvg(node, outermost=True)\n for xlink in self.waiting_use_nodes.keys():\n logger.debug(\"Ignoring unavailable object width ID '%s'.\" % xlink)\n\n view_box = self.get_box(node, default_box=True)\n main_group.translate(0 - view_box.x, -view_box.height - view_box.y)\n\n width, height = svg_node.attrib.get(\"width\"), svg_node.attrib.get(\"height\")\n width, height = map(self.attrConverter.convertLength, (width, height))\n drawing = Drawing(width, height)\n drawing.add(main_group)\n return drawing\n"
] | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""A library for reading and converting SVG.
This is a converter from SVG to RLG (ReportLab Graphics) drawings.
It converts mainly basic shapes, paths and simple text. The intended
usage is either as module within other projects:
from svglib.svglib import svg2rlg
drawing = svg2rlg("foo.svg")
or from the command-line where it is usable as an SVG to PDF converting
tool named sv2pdf (which should also handle SVG files compressed with
gzip and extension .svgz).
"""
import copy
import gzip
import itertools
import logging
import os
import re
import base64
import tempfile
import shlex
import shutil
import subprocess
import sys
from collections import defaultdict, namedtuple
from functools import partial
from reportlab.pdfbase.pdfmetrics import registerFont, stringWidth
from reportlab.pdfbase.ttfonts import TTFError, TTFont
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
from reportlab.pdfgen.pdfimages import PDFImage
from reportlab.graphics.shapes import (
_CLOSEPATH, Circle, Drawing, Ellipse, Group, Image, Line, Path, PolyLine,
Polygon, Rect, String,
)
from reportlab.lib import colors
from reportlab.lib.units import pica, toLength
from reportlab.lib.utils import haveImages
from lxml import etree
import cssselect2
import tinycss2
from .utils import (
bezier_arc_from_end_points, convert_quadratic_to_cubic_path,
normalise_svg_path,
)
__version__ = '0.9.0'
__license__ = 'LGPL 3'
__author__ = 'Dinu Gherman'
__date__ = '2018-12-08'
XML_NS = 'http://www.w3.org/XML/1998/namespace'
# A sentinel to identify a situation where a node reference a fragment not yet defined.
DELAYED = object()
STANDARD_FONT_NAMES = (
'Times-Roman', 'Times-Italic', 'Times-Bold', 'Times-BoldItalic',
'Helvetica', 'Helvetica-Oblique', 'Helvetica-Bold', 'Helvetica-BoldOblique',
'Courier', 'Courier-Oblique', 'Courier-Bold', 'Courier-BoldOblique',
'Symbol', 'ZapfDingbats',
)
DEFAULT_FONT_NAME = "Helvetica"
_registered_fonts = {}
logger = logging.getLogger(__name__)
Box = namedtuple('Box', ['x', 'y', 'width', 'height'])
split_whitespace = re.compile(r'[^ \t\r\n\f]+').findall
def find_font(font_name):
"""Return the font and a Boolean indicating if the match is exact."""
if font_name in STANDARD_FONT_NAMES:
return font_name, True
elif font_name in _registered_fonts:
return font_name, _registered_fonts[font_name]
NOT_FOUND = (None, False)
try:
# Try first to register the font if it exists as ttf,
# based on ReportLab font search.
registerFont(TTFont(font_name, '%s.ttf' % font_name))
_registered_fonts[font_name] = True
return font_name, True
except TTFError:
# Try searching with Fontconfig
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', font_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = pipe.communicate()[0].decode(sys.getfilesystemencoding())
font_path = output.split('\n')[0]
except OSError:
return NOT_FOUND
try:
registerFont(TTFont(font_name, font_path))
except TTFError:
return NOT_FOUND
# Fontconfig may return a default font totally unrelated with font_name
exact = font_name.lower() in os.path.basename(font_path).lower()
_registered_fonts[font_name] = exact
return font_name, exact
class NoStrokePath(Path):
"""
This path object never gets a stroke width whatever the properties it's
getting assigned.
"""
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
Path.__init__(self, *args, **kwargs) # we're old-style class on PY2
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
def getProperties(self, *args, **kwargs):
# __getattribute__ wouldn't suit, as RL is directly accessing self.__dict__
props = Path.getProperties(self, *args, **kwargs)
if 'strokeWidth' in props:
props['strokeWidth'] = 0
if 'strokeColor' in props:
props['strokeColor'] = None
return props
class ClippingPath(Path):
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
Path.__init__(self, *args, **kwargs)
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
self.isClipPath = 1
def getProperties(self, *args, **kwargs):
props = Path.getProperties(self, *args, **kwargs)
if 'fillColor' in props:
props['fillColor'] = None
return props
class CSSMatcher(cssselect2.Matcher):
def __init__(self, style_content):
super(CSSMatcher, self).__init__()
self.rules = tinycss2.parse_stylesheet(
style_content, skip_comments=True, skip_whitespace=True
)
for rule in self.rules:
if not rule.prelude:
continue
selectors = cssselect2.compile_selector_list(rule.prelude)
selector_string = tinycss2.serialize(rule.prelude)
content_dict = dict(
(attr.split(':')[0].strip(), attr.split(':')[1].strip())
for attr in tinycss2.serialize(rule.content).split(';')
if ':' in attr
)
payload = (selector_string, content_dict)
for selector in selectors:
self.add_selector(selector, payload)
# Attribute converters (from SVG to RLG)
class AttributeConverter(object):
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return ''
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result
class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def __init__(self, color_converter=None):
super(Svg2RlgAttributeConverter, self).__init__()
self.color_converter = color_converter or self.identity_color_converter
@staticmethod
def identity_color_converter(c):
return c
@staticmethod
def split_attr_list(attr):
return shlex.split(attr.strip().replace(',', ' '))
def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length
def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
def convertOpacity(self, svgAttr):
return float(svgAttr)
def convertFillRule(self, svgAttr):
return {
'nonzero': FILL_NON_ZERO,
'evenodd': FILL_EVEN_ODD,
}.get(svgAttr, '')
def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None
def convertLineJoin(self, svgAttr):
return {"miter":0, "round":1, "bevel":2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt":0, "round":1, "square":2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
if not svgAttr:
return ''
# very hackish
font_mapping = {
"sans-serif": "Helvetica",
"serif": "Times-Roman",
"times": "Times-Roman",
"monospace": "Courier",
}
font_names = [
font_mapping.get(font_name.lower(), font_name)
for font_name in self.split_attr_list(svgAttr)
]
non_exact_matches = []
for font_name in font_names:
font_name, exact = find_font(font_name)
if exact:
return font_name
elif font_name:
non_exact_matches.append(font_name)
if non_exact_matches:
return non_exact_matches[0]
else:
logger.warning("Unable to find a suitable font for 'font-family:%s'" % svgAttr)
return DEFAULT_FONT_NAME
class ElementWrapper(object):
"""
lxml element wrapper to partially match the API from cssselect2.ElementWrapper
so as element can be passed to rules.match().
"""
in_html_document = False
def __init__(self, obj):
self.object = obj
@property
def id(self):
return self.object.attrib.get('id')
@property
def etree_element(self):
return self.object
@property
def parent(self):
par = self.object.getparent()
return ElementWrapper(par) if par is not None else None
@property
def classes(self):
cl = self.object.attrib.get('class')
return split_whitespace(cl) if cl is not None else []
@property
def local_name(self):
return node_name(self.object)
@property
def namespace_url(self):
if '}' in self.object.tag:
self.object.tag.split('}')[0][1:]
def iter_ancestors(self):
element = self
while element.parent is not None:
element = element.parent
yield element
def apply_rules(self, rules):
matches = rules.match(self)
for match in matches:
attr_dict = match[3][1]
for attr, val in attr_dict.items():
if not attr in self.object.attrib:
try:
self.object.attrib[attr] = val
except ValueError:
pass
# Set marker on the node to not apply rules more than once
self.object.set('__rules_applied', '1')
class NodeTracker(ElementWrapper):
"""An object wrapper keeping track of arguments to certain method calls.
Instances wrap an object and store all arguments to one special
method, getAttribute(name), in a list of unique elements, usedAttrs.
"""
def __init__(self, obj):
super(NodeTracker, self).__init__(obj)
self.usedAttrs = []
def getAttribute(self, name):
# add argument to the history, if not already present
if name not in self.usedAttrs:
self.usedAttrs.append(name)
# forward call to wrapped object
return self.object.attrib.get(name, '')
def __getattr__(self, name):
# forward attribute access to wrapped object
return getattr(self.object, name)
class CircularRefError(Exception):
pass
class ExternalSVG:
def __init__(self, path, renderer):
self.root_node = load_svg_file(path)
self.renderer = SvgRenderer(path, parent_svgs=renderer._parent_chain + [renderer.source_path])
self.rendered = False
def get_fragment(self, fragment):
if not self.rendered:
self.renderer.render(self.root_node)
self.rendered = True
return self.renderer.definitions.get(fragment)
### the main meat ###
class SvgRenderer:
"""Renderer that renders an SVG file on a ReportLab Drawing instance.
This is the base class for walking over an SVG DOM document and
transforming it into a ReportLab Drawing instance.
"""
def __init__(self, path, color_converter=None, parent_svgs=None):
self.source_path = path
self._parent_chain = parent_svgs or [] # To detect circular refs.
self.attrConverter = Svg2RlgAttributeConverter(color_converter=color_converter)
self.shape_converter = Svg2RlgShapeConverter(path, self.attrConverter)
self.handled_shapes = self.shape_converter.get_handled_shapes()
self.definitions = {}
self.waiting_use_nodes = defaultdict(list)
self._external_svgs = {}
def render(self, svg_node):
node = NodeTracker(svg_node)
main_group = self.renderSvg(node, outermost=True)
for xlink in self.waiting_use_nodes.keys():
logger.debug("Ignoring unavailable object width ID '%s'." % xlink)
view_box = self.get_box(node, default_box=True)
main_group.translate(0 - view_box.x, -view_box.height - view_box.y)
width, height = svg_node.attrib.get("width"), svg_node.attrib.get("height")
width, height = map(self.attrConverter.convertLength, (width, height))
drawing = Drawing(width, height)
drawing.add(main_group)
return drawing
def renderNode(self, node, parent=None):
n = NodeTracker(node)
nid = n.getAttribute("id")
ignored = False
item = None
name = node_name(node)
clipping = self.get_clippath(n)
if name == "svg":
item = self.renderSvg(n)
parent.add(item)
elif name == "defs":
item = self.renderG(n)
elif name == 'a':
item = self.renderA(n)
parent.add(item)
elif name == 'g':
display = n.getAttribute("display")
item = self.renderG(n, clipping=clipping)
if display != "none":
parent.add(item)
elif name == "style":
self.renderStyle(n)
elif name == "symbol":
item = self.renderSymbol(n)
parent.add(item)
elif name == "use":
item = self.renderUse(n, clipping=clipping)
parent.add(item)
elif name == "clipPath":
item = self.renderG(n)
elif name in self.handled_shapes:
if name == 'image':
# We resolve the image target at renderer level because it can point
# to another SVG file or node which has to be rendered too.
target = self.xlink_href_target(n)
if target is None:
return
elif isinstance(target, tuple):
# This is SVG content needed to be rendered
gr = Group()
renderer, node = target
renderer.renderNode(node, parent=gr)
self.apply_node_attr_to_group(n, gr)
parent.add(gr)
return
else:
# Attaching target to node, so we can get it back in convertImage
n._resolved_target = target
item = self.shape_converter.convertShape(name, n, clipping)
display = n.getAttribute("display")
if item and display != "none":
parent.add(item)
else:
ignored = True
logger.debug("Ignoring node: %s" % name)
if not ignored:
if nid and item:
self.definitions[nid] = node
if nid in self.waiting_use_nodes.keys():
to_render = self.waiting_use_nodes.pop(nid)
for use_node, group in to_render:
self.renderUse(use_node, group=group)
self.print_unused_attributes(node, n)
def get_clippath(self, node):
"""
Return the clipping Path object referenced by the node 'clip-path'
attribute, if any.
"""
def get_path_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
else:
return get_path_from_node(child)
clip_path = node.getAttribute('clip-path')
if clip_path:
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if m:
ref = m.groups()[0]
if ref in self.definitions:
path = get_path_from_node(self.definitions[ref])
if path:
path = ClippingPath(copy_from=path)
return path
def print_unused_attributes(self, node, n):
if logger.level > logging.DEBUG:
return
all_attrs = self.attrConverter.getAllAttributes(node).keys()
unused_attrs = [attr for attr in all_attrs if attr not in n.usedAttrs]
if unused_attrs:
logger.debug("Unused attrs: %s %s" % (node_name(n), unused_attrs))
def apply_node_attr_to_group(self, node, group):
getAttr = node.getAttribute
transform, x, y = map(getAttr, ("transform", "x", "y"))
if x or y:
transform += " translate(%s, %s)" % (x or '0', y or '0')
if transform:
self.shape_converter.applyTransformOnGroup(transform, group)
def xlink_href_target(self, node, group=None):
"""
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
"""
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED
def renderTitle_(self, node):
# Main SVG title attr. could be used in the PDF document info field.
pass
def renderDesc_(self, node):
# Main SVG desc. attr. could be used in the PDF document info field.
pass
def get_box(self, svg_node, default_box=False):
view_box = svg_node.getAttribute("viewBox")
if view_box:
view_box = self.attrConverter.convertLengthList(view_box)
return Box(*view_box)
if default_box:
width, height = map(svg_node.getAttribute, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
return Box(0, 0, width, height)
def renderSvg(self, node, outermost=False):
getAttr = node.getAttribute
_saved_preserve_space = self.shape_converter.preserve_space
self.shape_converter.preserve_space = getAttr("{%s}space" % XML_NS) == 'preserve'
group = Group()
for child in node.getchildren():
self.renderNode(child, group)
self.shape_converter.preserve_space = _saved_preserve_space
# Translating
if not outermost:
x, y = map(getAttr, ("x", "y"))
x, y = map(self.attrConverter.convertLength, (x, y))
if x or y:
group.translate(x or 0, y or 0)
# Scaling
view_box = self.get_box(node)
if not view_box and outermost:
# Apply only the 'reverse' y-scaling (PDF 0,0 is bottom left)
group.scale(1, -1)
elif view_box:
x_scale, y_scale = 1, 1
width, height = map(getAttr, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
if view_box.height != height:
y_scale = height / view_box.height
if view_box.width != width:
x_scale = width / view_box.width
group.scale(x_scale, y_scale * (-1 if outermost else 1))
return group
def renderG(self, node, clipping=None, display=1):
getAttr = node.getAttribute
id, transform = map(getAttr, ("id", "transform"))
gr = Group()
if clipping:
gr.add(clipping)
for child in node.getchildren():
item = self.renderNode(child, parent=gr)
if item and display:
gr.add(item)
if transform:
self.shape_converter.applyTransformOnGroup(transform, gr)
return gr
def renderStyle(self, node):
self.attrConverter.css_rules = CSSMatcher(node.text)
def renderSymbol(self, node):
return self.renderG(node, display=0)
def renderA(self, node):
# currently nothing but a group...
# there is no linking info stored in shapes, maybe a group should?
return self.renderG(node)
def renderUse(self, node, group=None, clipping=None):
if group is None:
group = Group()
try:
item = self.xlink_href_target(node, group=group)
except CircularRefError:
node.parent.object.remove(node.object)
return group
if item is None:
return
elif isinstance(item, str):
logger.error("<use> nodes cannot reference bitmap image files")
return
elif item is DELAYED:
return group
else:
item = item[1] # [0] is the renderer, not used here.
if clipping:
group.add(clipping)
if len(node.getchildren()) == 0:
# Append a copy of the referenced node as the <use> child (if not already done)
node.append(copy.deepcopy(item))
self.renderNode(node.getchildren()[-1], parent=group)
self.apply_node_attr_to_group(node, group)
return group
class SvgShapeConverter:
"""An abstract SVG shape converter.
Implement subclasses with methods named 'renderX(node)', where
'X' should be the capitalised name of an SVG node element for
shapes, like 'Rect', 'Circle', 'Line', etc.
Each of these methods should return a shape object appropriate
for the target format.
"""
def __init__(self, path, attrConverter=None):
self.attrConverter = attrConverter or Svg2RlgAttributeConverter()
self.svg_source_file = path
self.preserve_space = False
@classmethod
def get_handled_shapes(cls):
"""Dynamically determine a list of handled shape elements based on
convert<shape> method existence.
"""
return [key[7:].lower() for key in dir(cls) if key.startswith('convert')]
class Svg2RlgShapeConverter(SvgShapeConverter):
"""Converter from SVG shapes to RLG (ReportLab Graphics) shapes."""
def convertShape(self, name, node, clipping=None):
method_name = "convert%s" % name.capitalize()
shape = getattr(self, method_name)(node)
if not shape:
return
if name not in ('path', 'polyline', 'text'):
# Only apply style where the convert method did not apply it.
self.applyStyleOnShape(shape, node)
transform = node.getAttribute("transform")
if not (transform or clipping):
return shape
else:
group = Group()
if transform:
self.applyTransformOnGroup(transform, group)
if clipping:
group.add(clipping)
group.add(shape)
return group
def convertLine(self, node):
getAttr = node.getAttribute
x1, y1, x2, y2 = map(getAttr, ("x1", "y1", "x2", "y2"))
x1, y1, x2, y2 = map(self.attrConverter.convertLength, (x1, y1, x2, y2))
shape = Line(x1, y1, x2, y2)
return shape
def convertRect(self, node):
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
rx, ry = map(getAttr, ("rx", "ry"))
rx, ry = map(self.attrConverter.convertLength, (rx, ry))
shape = Rect(x, y, width, height, rx=rx, ry=ry)
return shape
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
getAttr = node.getAttribute
cx, cy, r = map(getAttr, ("cx", "cy", 'r'))
cx, cy, r = map(self.attrConverter.convertLength, (cx, cy, r))
shape = Circle(cx, cy, r)
return shape
def convertEllipse(self, node):
getAttr = node.getAttribute
cx, cy, rx, ry = map(getAttr, ("cx", "cy", "rx", "ry"))
cx, cy, rx, ry = map(self.attrConverter.convertLength, (cx, cy, rx, ry))
width, height = rx, ry
shape = Ellipse(cx, cy, width, height)
return shape
def convertPolyline(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polyline
return None
polyline = PolyLine(points)
self.applyStyleOnShape(polyline, node)
has_fill = self.attrConverter.findAttr(node, 'fill') not in ('', 'none')
if has_fill:
# ReportLab doesn't fill polylines, so we are creating a polygon
# polygon copy of the polyline, but without stroke.
group = Group()
polygon = Polygon(points)
self.applyStyleOnShape(polygon, node)
polygon.strokeColor = None
group.add(polygon)
group.add(polyline)
return group
return polyline
def convertPolygon(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polygon
return None
shape = Polygon(points)
return shape
def clean_text(self, text, preserve_space):
"""Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace
"""
if text is None:
return
if preserve_space:
text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ')
else:
text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ')
text = text.strip()
while (' ' in text):
text = text.replace(' ', ' ')
return text
def convertText(self, node):
attrConv = self.attrConverter
xml_space = node.getAttribute("{%s}space" % XML_NS)
if xml_space:
preserve_space = xml_space == 'preserve'
else:
preserve_space = self.preserve_space
gr = Group()
frag_lengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or DEFAULT_FONT_NAME
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = attrConv.convertLength(fs)
convertLength = partial(attrConv.convertLength, em_base=fs)
x, y = map(node.getAttribute, ('x', 'y'))
x, y = map(convertLength, (x, y))
for c in itertools.chain([node], node.getchildren()):
has_x, has_y = False, False
dx, dy = 0, 0
baseLineShift = 0
if node_name(c) == 'text':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
elif node_name(c) == 'tspan':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
x1, y1, dx, dy = [c.attrib.get(name, '') for name in ("x", "y", "dx", "dy")]
has_x, has_y = (x1 != '', y1 != '')
x1, y1, dx, dy = map(convertLength, (x1, y1, dx, dy))
dx0 = dx0 + dx
dy0 = dy0 + dy
baseLineShift = c.attrib.get("baseline-shift", '0')
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub":-fs/2, "super":fs/2, "baseline":0}[baseLineShift]
else:
baseLineShift = convertLength(baseLineShift, fs)
else:
continue
frag_lengths.append(stringWidth(text, ff, fs))
new_x = (x1 + dx) if has_x else (x + dx0 + sum(frag_lengths[:-1]))
new_y = (y1 + dy) if has_y else (y + dy0)
shape = String(new_x, -(new_y - baseLineShift), text)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
if not d:
return None
normPath = normalise_svg_path(d)
path = Path()
points = path.points
# Track subpaths needing to be closed later
unclosed_subpath_pointers = []
subpath_start = []
lastop = ''
for i in range(0, len(normPath), 2):
op, nums = normPath[i:i+2]
if op in ('m', 'M') and i > 0 and path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
# moveto absolute
if op == 'M':
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto absolute
elif op == 'L':
path.lineTo(*nums)
# moveto relative
elif op == 'm':
if len(points) >= 2:
if lastop in ('Z', 'z'):
starting_point = subpath_start
else:
starting_point = points[-2:]
xn, yn = starting_point[0] + nums[0], starting_point[1] + nums[1]
path.moveTo(xn, yn)
else:
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto relative
elif op == 'l':
xn, yn = points[-2] + nums[0], points[-1] + nums[1]
path.lineTo(xn, yn)
# horizontal/vertical line absolute
elif op == 'H':
path.lineTo(nums[0], points[-1])
elif op == 'V':
path.lineTo(points[-2], nums[0])
# horizontal/vertical line relative
elif op == 'h':
path.lineTo(points[-2] + nums[0], points[-1])
elif op == 'v':
path.lineTo(points[-2], points[-1] + nums[0])
# cubic bezier, absolute
elif op == 'C':
path.curveTo(*nums)
elif op == 'S':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x2, y2, xn, yn)
# cubic bezier, relative
elif op == 'c':
xp, yp = points[-2:]
x1, y1, x2, y2, xn, yn = nums
path.curveTo(xp + x1, yp + y1, xp + x2, yp + y2, xp + xn, yp + yn)
elif op == 's':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x0 + x2, y0 + y2, x0 + xn, y0 + yn)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 'T':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0 + x1, y0 + y1, x0 + xn, y0 + yn
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 't':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
x0, y0 = points[-2:]
xn, yn = nums
xn, yn = x0 + xn, y0 + yn
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# elliptical arc
elif op in ('A', 'a'):
rx, ry, phi, fA, fS, x2, y2 = nums
x1, y1 = points[-2:]
if op == 'a':
x2 += x1
y2 += y1
if abs(rx) <= 1e-10 or abs(ry) <= 1e-10:
path.lineTo(x2, y2)
else:
bp = bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2)
for _, _, x1, y1, x2, y2, xn, yn in bp:
path.curveTo(x1, y1, x2, y2, xn, yn)
# close path
elif op in ('Z', 'z'):
path.closePath()
else:
logger.debug("Suspicious path operator: %s" % op)
lastop = op
gr = Group()
self.applyStyleOnShape(path, node)
if path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
if unclosed_subpath_pointers and path.fillColor is not None:
# ReportLab doesn't fill unclosed paths, so we are creating a copy
# of the path with all subpaths closed, but without stroke.
# https://bitbucket.org/rptlab/reportlab/issues/99/
closed_path = NoStrokePath(copy_from=path)
for pointer in reversed(unclosed_subpath_pointers):
closed_path.operators.insert(pointer, _CLOSEPATH)
gr.add(closed_path)
path.fillColor = None
gr.add(path)
return gr
def convertImage(self, node):
if not haveImages:
logger.warning(
"Unable to handle embedded images. Maybe the pillow library is missing?"
)
return None
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
image = node._resolved_target
image = Image(int(x), int(y + height), int(width), int(height), image)
group = Group(image)
group.translate(0, (y + height) * 2)
group.scale(1, -1)
return group
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values))
def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity
def load_svg_file(path):
parser = etree.XMLParser(remove_comments=True, recover=True)
try:
doc = etree.parse(path, parser=parser)
svg_root = doc.getroot()
except Exception as exc:
logger.error("Failed to load input file! (%s)" % exc)
else:
return svg_root
def node_name(node):
"""Return lxml node name without the namespace prefix."""
try:
return node.tag.split('}')[-1]
except AttributeError:
pass
def monkeypatch_reportlab():
"""
https://bitbucket.org/rptlab/reportlab/issues/95/
ReportLab always use 'Even-Odd' filling mode for paths, this patch forces
RL to honor the path fill rule mode (possibly 'Non-Zero Winding') instead.
"""
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics import shapes
original_renderPath = shapes._renderPath
def patchedRenderPath(path, drawFuncs, **kwargs):
# Patched method to transfer fillRule from Path to PDFPathObject
# Get back from bound method to instance
try:
drawFuncs[0].__self__.fillMode = path._fillRule
except AttributeError:
pass
return original_renderPath(path, drawFuncs, **kwargs)
shapes._renderPath = patchedRenderPath
original_drawPath = Canvas.drawPath
def patchedDrawPath(self, path, **kwargs):
current = self._fillMode
if hasattr(path, 'fillMode'):
self._fillMode = path.fillMode
else:
self._fillMode = FILL_NON_ZERO
original_drawPath(self, path, **kwargs)
self._fillMode = current
Canvas.drawPath = patchedDrawPath
monkeypatch_reportlab()
|
deeplook/svglib | svglib/svglib.py | monkeypatch_reportlab | python | def monkeypatch_reportlab():
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics import shapes
original_renderPath = shapes._renderPath
def patchedRenderPath(path, drawFuncs, **kwargs):
# Patched method to transfer fillRule from Path to PDFPathObject
# Get back from bound method to instance
try:
drawFuncs[0].__self__.fillMode = path._fillRule
except AttributeError:
pass
return original_renderPath(path, drawFuncs, **kwargs)
shapes._renderPath = patchedRenderPath
original_drawPath = Canvas.drawPath
def patchedDrawPath(self, path, **kwargs):
current = self._fillMode
if hasattr(path, 'fillMode'):
self._fillMode = path.fillMode
else:
self._fillMode = FILL_NON_ZERO
original_drawPath(self, path, **kwargs)
self._fillMode = current
Canvas.drawPath = patchedDrawPath | https://bitbucket.org/rptlab/reportlab/issues/95/
ReportLab always use 'Even-Odd' filling mode for paths, this patch forces
RL to honor the path fill rule mode (possibly 'Non-Zero Winding') instead. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L1370-L1399 | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""A library for reading and converting SVG.
This is a converter from SVG to RLG (ReportLab Graphics) drawings.
It converts mainly basic shapes, paths and simple text. The intended
usage is either as module within other projects:
from svglib.svglib import svg2rlg
drawing = svg2rlg("foo.svg")
or from the command-line where it is usable as an SVG to PDF converting
tool named sv2pdf (which should also handle SVG files compressed with
gzip and extension .svgz).
"""
import copy
import gzip
import itertools
import logging
import os
import re
import base64
import tempfile
import shlex
import shutil
import subprocess
import sys
from collections import defaultdict, namedtuple
from functools import partial
from reportlab.pdfbase.pdfmetrics import registerFont, stringWidth
from reportlab.pdfbase.ttfonts import TTFError, TTFont
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
from reportlab.pdfgen.pdfimages import PDFImage
from reportlab.graphics.shapes import (
_CLOSEPATH, Circle, Drawing, Ellipse, Group, Image, Line, Path, PolyLine,
Polygon, Rect, String,
)
from reportlab.lib import colors
from reportlab.lib.units import pica, toLength
from reportlab.lib.utils import haveImages
from lxml import etree
import cssselect2
import tinycss2
from .utils import (
bezier_arc_from_end_points, convert_quadratic_to_cubic_path,
normalise_svg_path,
)
__version__ = '0.9.0'
__license__ = 'LGPL 3'
__author__ = 'Dinu Gherman'
__date__ = '2018-12-08'
XML_NS = 'http://www.w3.org/XML/1998/namespace'
# A sentinel to identify a situation where a node reference a fragment not yet defined.
DELAYED = object()
STANDARD_FONT_NAMES = (
'Times-Roman', 'Times-Italic', 'Times-Bold', 'Times-BoldItalic',
'Helvetica', 'Helvetica-Oblique', 'Helvetica-Bold', 'Helvetica-BoldOblique',
'Courier', 'Courier-Oblique', 'Courier-Bold', 'Courier-BoldOblique',
'Symbol', 'ZapfDingbats',
)
DEFAULT_FONT_NAME = "Helvetica"
_registered_fonts = {}
logger = logging.getLogger(__name__)
Box = namedtuple('Box', ['x', 'y', 'width', 'height'])
split_whitespace = re.compile(r'[^ \t\r\n\f]+').findall
def find_font(font_name):
"""Return the font and a Boolean indicating if the match is exact."""
if font_name in STANDARD_FONT_NAMES:
return font_name, True
elif font_name in _registered_fonts:
return font_name, _registered_fonts[font_name]
NOT_FOUND = (None, False)
try:
# Try first to register the font if it exists as ttf,
# based on ReportLab font search.
registerFont(TTFont(font_name, '%s.ttf' % font_name))
_registered_fonts[font_name] = True
return font_name, True
except TTFError:
# Try searching with Fontconfig
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', font_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = pipe.communicate()[0].decode(sys.getfilesystemencoding())
font_path = output.split('\n')[0]
except OSError:
return NOT_FOUND
try:
registerFont(TTFont(font_name, font_path))
except TTFError:
return NOT_FOUND
# Fontconfig may return a default font totally unrelated with font_name
exact = font_name.lower() in os.path.basename(font_path).lower()
_registered_fonts[font_name] = exact
return font_name, exact
class NoStrokePath(Path):
"""
This path object never gets a stroke width whatever the properties it's
getting assigned.
"""
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
Path.__init__(self, *args, **kwargs) # we're old-style class on PY2
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
def getProperties(self, *args, **kwargs):
# __getattribute__ wouldn't suit, as RL is directly accessing self.__dict__
props = Path.getProperties(self, *args, **kwargs)
if 'strokeWidth' in props:
props['strokeWidth'] = 0
if 'strokeColor' in props:
props['strokeColor'] = None
return props
class ClippingPath(Path):
def __init__(self, *args, **kwargs):
copy_from = kwargs.pop('copy_from', None)
Path.__init__(self, *args, **kwargs)
if copy_from:
self.__dict__.update(copy.deepcopy(copy_from.__dict__))
self.isClipPath = 1
def getProperties(self, *args, **kwargs):
props = Path.getProperties(self, *args, **kwargs)
if 'fillColor' in props:
props['fillColor'] = None
return props
class CSSMatcher(cssselect2.Matcher):
def __init__(self, style_content):
super(CSSMatcher, self).__init__()
self.rules = tinycss2.parse_stylesheet(
style_content, skip_comments=True, skip_whitespace=True
)
for rule in self.rules:
if not rule.prelude:
continue
selectors = cssselect2.compile_selector_list(rule.prelude)
selector_string = tinycss2.serialize(rule.prelude)
content_dict = dict(
(attr.split(':')[0].strip(), attr.split(':')[1].strip())
for attr in tinycss2.serialize(rule.content).split(';')
if ':' in attr
)
payload = (selector_string, content_dict)
for selector in selectors:
self.add_selector(selector, payload)
# Attribute converters (from SVG to RLG)
class AttributeConverter(object):
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return ''
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result
class Svg2RlgAttributeConverter(AttributeConverter):
"A concrete SVG to RLG attribute converter."
def __init__(self, color_converter=None):
super(Svg2RlgAttributeConverter, self).__init__()
self.color_converter = color_converter or self.identity_color_converter
@staticmethod
def identity_color_converter(c):
return c
@staticmethod
def split_attr_list(attr):
return shlex.split(attr.strip().replace(',', ' '))
def convertLength(self, svgAttr, percentOf=100, em_base=12):
"Convert length to points."
text = svgAttr
if not text:
return 0.0
if ' ' in text.replace(',', ' ').strip():
logger.debug("Only getting first value of %s" % text)
text = text.replace(',', ' ').split()[0]
if text.endswith('%'):
logger.debug("Fiddling length unit: %")
return float(text[:-1]) / 100 * percentOf
elif text.endswith("pc"):
return float(text[:-2]) * pica
elif text.endswith("pt"):
return float(text[:-2]) * 1.25
elif text.endswith("em"):
return float(text[:-2]) * em_base
elif text.endswith("px"):
return float(text[:-2])
if "ex" in text:
logger.warning("Ignoring unit ex")
text = text.replace("ex", '')
text = text.strip()
length = toLength(text) # this does the default measurements such as mm and cm
return length
def convertLengthList(self, svgAttr):
"""Convert a list of lengths."""
return [self.convertLength(a) for a in self.split_attr_list(svgAttr)]
def convertOpacity(self, svgAttr):
return float(svgAttr)
def convertFillRule(self, svgAttr):
return {
'nonzero': FILL_NON_ZERO,
'evenodd': FILL_EVEN_ODD,
}.get(svgAttr, '')
def convertColor(self, svgAttr):
"Convert string to a RL color object."
# fix it: most likely all "web colors" are allowed
predefined = "aqua black blue fuchsia gray green lime maroon navy "
predefined = predefined + "olive orange purple red silver teal white yellow "
predefined = predefined + "lawngreen indianred aquamarine lightgreen brown"
# This needs also to lookup values like "url(#SomeName)"...
text = svgAttr
if not text or text == "none":
return None
if text in predefined.split():
return self.color_converter(getattr(colors, text))
elif text == "currentColor":
return "currentColor"
elif len(text) == 7 and text[0] == '#':
return self.color_converter(colors.HexColor(text))
elif len(text) == 4 and text[0] == '#':
return self.color_converter(colors.HexColor('#' + 2*text[1] + 2*text[2] + 2*text[3]))
elif text.startswith('rgb') and '%' not in text:
t = text[3:].strip('()')
tup = [h[2:] for h in [hex(int(num)) for num in t.split(',')]]
tup = [(2 - len(h)) * '0' + h for h in tup]
col = "#%s%s%s" % tuple(tup)
return self.color_converter(colors.HexColor(col))
elif text.startswith('rgb') and '%' in text:
t = text[3:].replace('%', '').strip('()')
tup = (float(val)/100.0 for val in t.split(','))
return self.color_converter(colors.Color(*tup))
logger.warning("Can't handle color: %s" % text)
return None
def convertLineJoin(self, svgAttr):
return {"miter":0, "round":1, "bevel":2}[svgAttr]
def convertLineCap(self, svgAttr):
return {"butt":0, "round":1, "square":2}[svgAttr]
def convertDashArray(self, svgAttr):
strokeDashArray = self.convertLengthList(svgAttr)
return strokeDashArray
def convertDashOffset(self, svgAttr):
strokeDashOffset = self.convertLength(svgAttr)
return strokeDashOffset
def convertFontFamily(self, svgAttr):
if not svgAttr:
return ''
# very hackish
font_mapping = {
"sans-serif": "Helvetica",
"serif": "Times-Roman",
"times": "Times-Roman",
"monospace": "Courier",
}
font_names = [
font_mapping.get(font_name.lower(), font_name)
for font_name in self.split_attr_list(svgAttr)
]
non_exact_matches = []
for font_name in font_names:
font_name, exact = find_font(font_name)
if exact:
return font_name
elif font_name:
non_exact_matches.append(font_name)
if non_exact_matches:
return non_exact_matches[0]
else:
logger.warning("Unable to find a suitable font for 'font-family:%s'" % svgAttr)
return DEFAULT_FONT_NAME
class ElementWrapper(object):
"""
lxml element wrapper to partially match the API from cssselect2.ElementWrapper
so as element can be passed to rules.match().
"""
in_html_document = False
def __init__(self, obj):
self.object = obj
@property
def id(self):
return self.object.attrib.get('id')
@property
def etree_element(self):
return self.object
@property
def parent(self):
par = self.object.getparent()
return ElementWrapper(par) if par is not None else None
@property
def classes(self):
cl = self.object.attrib.get('class')
return split_whitespace(cl) if cl is not None else []
@property
def local_name(self):
return node_name(self.object)
@property
def namespace_url(self):
if '}' in self.object.tag:
self.object.tag.split('}')[0][1:]
def iter_ancestors(self):
element = self
while element.parent is not None:
element = element.parent
yield element
def apply_rules(self, rules):
matches = rules.match(self)
for match in matches:
attr_dict = match[3][1]
for attr, val in attr_dict.items():
if not attr in self.object.attrib:
try:
self.object.attrib[attr] = val
except ValueError:
pass
# Set marker on the node to not apply rules more than once
self.object.set('__rules_applied', '1')
class NodeTracker(ElementWrapper):
"""An object wrapper keeping track of arguments to certain method calls.
Instances wrap an object and store all arguments to one special
method, getAttribute(name), in a list of unique elements, usedAttrs.
"""
def __init__(self, obj):
super(NodeTracker, self).__init__(obj)
self.usedAttrs = []
def getAttribute(self, name):
# add argument to the history, if not already present
if name not in self.usedAttrs:
self.usedAttrs.append(name)
# forward call to wrapped object
return self.object.attrib.get(name, '')
def __getattr__(self, name):
# forward attribute access to wrapped object
return getattr(self.object, name)
class CircularRefError(Exception):
pass
class ExternalSVG:
def __init__(self, path, renderer):
self.root_node = load_svg_file(path)
self.renderer = SvgRenderer(path, parent_svgs=renderer._parent_chain + [renderer.source_path])
self.rendered = False
def get_fragment(self, fragment):
if not self.rendered:
self.renderer.render(self.root_node)
self.rendered = True
return self.renderer.definitions.get(fragment)
### the main meat ###
class SvgRenderer:
"""Renderer that renders an SVG file on a ReportLab Drawing instance.
This is the base class for walking over an SVG DOM document and
transforming it into a ReportLab Drawing instance.
"""
def __init__(self, path, color_converter=None, parent_svgs=None):
self.source_path = path
self._parent_chain = parent_svgs or [] # To detect circular refs.
self.attrConverter = Svg2RlgAttributeConverter(color_converter=color_converter)
self.shape_converter = Svg2RlgShapeConverter(path, self.attrConverter)
self.handled_shapes = self.shape_converter.get_handled_shapes()
self.definitions = {}
self.waiting_use_nodes = defaultdict(list)
self._external_svgs = {}
def render(self, svg_node):
node = NodeTracker(svg_node)
main_group = self.renderSvg(node, outermost=True)
for xlink in self.waiting_use_nodes.keys():
logger.debug("Ignoring unavailable object width ID '%s'." % xlink)
view_box = self.get_box(node, default_box=True)
main_group.translate(0 - view_box.x, -view_box.height - view_box.y)
width, height = svg_node.attrib.get("width"), svg_node.attrib.get("height")
width, height = map(self.attrConverter.convertLength, (width, height))
drawing = Drawing(width, height)
drawing.add(main_group)
return drawing
def renderNode(self, node, parent=None):
n = NodeTracker(node)
nid = n.getAttribute("id")
ignored = False
item = None
name = node_name(node)
clipping = self.get_clippath(n)
if name == "svg":
item = self.renderSvg(n)
parent.add(item)
elif name == "defs":
item = self.renderG(n)
elif name == 'a':
item = self.renderA(n)
parent.add(item)
elif name == 'g':
display = n.getAttribute("display")
item = self.renderG(n, clipping=clipping)
if display != "none":
parent.add(item)
elif name == "style":
self.renderStyle(n)
elif name == "symbol":
item = self.renderSymbol(n)
parent.add(item)
elif name == "use":
item = self.renderUse(n, clipping=clipping)
parent.add(item)
elif name == "clipPath":
item = self.renderG(n)
elif name in self.handled_shapes:
if name == 'image':
# We resolve the image target at renderer level because it can point
# to another SVG file or node which has to be rendered too.
target = self.xlink_href_target(n)
if target is None:
return
elif isinstance(target, tuple):
# This is SVG content needed to be rendered
gr = Group()
renderer, node = target
renderer.renderNode(node, parent=gr)
self.apply_node_attr_to_group(n, gr)
parent.add(gr)
return
else:
# Attaching target to node, so we can get it back in convertImage
n._resolved_target = target
item = self.shape_converter.convertShape(name, n, clipping)
display = n.getAttribute("display")
if item and display != "none":
parent.add(item)
else:
ignored = True
logger.debug("Ignoring node: %s" % name)
if not ignored:
if nid and item:
self.definitions[nid] = node
if nid in self.waiting_use_nodes.keys():
to_render = self.waiting_use_nodes.pop(nid)
for use_node, group in to_render:
self.renderUse(use_node, group=group)
self.print_unused_attributes(node, n)
def get_clippath(self, node):
"""
Return the clipping Path object referenced by the node 'clip-path'
attribute, if any.
"""
def get_path_from_node(node):
for child in node.getchildren():
if node_name(child) == 'path':
group = self.shape_converter.convertShape('path', NodeTracker(child))
return group.contents[-1]
else:
return get_path_from_node(child)
clip_path = node.getAttribute('clip-path')
if clip_path:
m = re.match(r'url\(#([^\)]*)\)', clip_path)
if m:
ref = m.groups()[0]
if ref in self.definitions:
path = get_path_from_node(self.definitions[ref])
if path:
path = ClippingPath(copy_from=path)
return path
def print_unused_attributes(self, node, n):
if logger.level > logging.DEBUG:
return
all_attrs = self.attrConverter.getAllAttributes(node).keys()
unused_attrs = [attr for attr in all_attrs if attr not in n.usedAttrs]
if unused_attrs:
logger.debug("Unused attrs: %s %s" % (node_name(n), unused_attrs))
def apply_node_attr_to_group(self, node, group):
getAttr = node.getAttribute
transform, x, y = map(getAttr, ("transform", "x", "y"))
if x or y:
transform += " translate(%s, %s)" % (x or '0', y or '0')
if transform:
self.shape_converter.applyTransformOnGroup(transform, group)
def xlink_href_target(self, node, group=None):
"""
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
"""
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED
def renderTitle_(self, node):
# Main SVG title attr. could be used in the PDF document info field.
pass
def renderDesc_(self, node):
# Main SVG desc. attr. could be used in the PDF document info field.
pass
def get_box(self, svg_node, default_box=False):
view_box = svg_node.getAttribute("viewBox")
if view_box:
view_box = self.attrConverter.convertLengthList(view_box)
return Box(*view_box)
if default_box:
width, height = map(svg_node.getAttribute, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
return Box(0, 0, width, height)
def renderSvg(self, node, outermost=False):
getAttr = node.getAttribute
_saved_preserve_space = self.shape_converter.preserve_space
self.shape_converter.preserve_space = getAttr("{%s}space" % XML_NS) == 'preserve'
group = Group()
for child in node.getchildren():
self.renderNode(child, group)
self.shape_converter.preserve_space = _saved_preserve_space
# Translating
if not outermost:
x, y = map(getAttr, ("x", "y"))
x, y = map(self.attrConverter.convertLength, (x, y))
if x or y:
group.translate(x or 0, y or 0)
# Scaling
view_box = self.get_box(node)
if not view_box and outermost:
# Apply only the 'reverse' y-scaling (PDF 0,0 is bottom left)
group.scale(1, -1)
elif view_box:
x_scale, y_scale = 1, 1
width, height = map(getAttr, ("width", "height"))
width, height = map(self.attrConverter.convertLength, (width, height))
if view_box.height != height:
y_scale = height / view_box.height
if view_box.width != width:
x_scale = width / view_box.width
group.scale(x_scale, y_scale * (-1 if outermost else 1))
return group
def renderG(self, node, clipping=None, display=1):
getAttr = node.getAttribute
id, transform = map(getAttr, ("id", "transform"))
gr = Group()
if clipping:
gr.add(clipping)
for child in node.getchildren():
item = self.renderNode(child, parent=gr)
if item and display:
gr.add(item)
if transform:
self.shape_converter.applyTransformOnGroup(transform, gr)
return gr
def renderStyle(self, node):
self.attrConverter.css_rules = CSSMatcher(node.text)
def renderSymbol(self, node):
return self.renderG(node, display=0)
def renderA(self, node):
# currently nothing but a group...
# there is no linking info stored in shapes, maybe a group should?
return self.renderG(node)
def renderUse(self, node, group=None, clipping=None):
if group is None:
group = Group()
try:
item = self.xlink_href_target(node, group=group)
except CircularRefError:
node.parent.object.remove(node.object)
return group
if item is None:
return
elif isinstance(item, str):
logger.error("<use> nodes cannot reference bitmap image files")
return
elif item is DELAYED:
return group
else:
item = item[1] # [0] is the renderer, not used here.
if clipping:
group.add(clipping)
if len(node.getchildren()) == 0:
# Append a copy of the referenced node as the <use> child (if not already done)
node.append(copy.deepcopy(item))
self.renderNode(node.getchildren()[-1], parent=group)
self.apply_node_attr_to_group(node, group)
return group
class SvgShapeConverter:
"""An abstract SVG shape converter.
Implement subclasses with methods named 'renderX(node)', where
'X' should be the capitalised name of an SVG node element for
shapes, like 'Rect', 'Circle', 'Line', etc.
Each of these methods should return a shape object appropriate
for the target format.
"""
def __init__(self, path, attrConverter=None):
self.attrConverter = attrConverter or Svg2RlgAttributeConverter()
self.svg_source_file = path
self.preserve_space = False
@classmethod
def get_handled_shapes(cls):
"""Dynamically determine a list of handled shape elements based on
convert<shape> method existence.
"""
return [key[7:].lower() for key in dir(cls) if key.startswith('convert')]
class Svg2RlgShapeConverter(SvgShapeConverter):
"""Converter from SVG shapes to RLG (ReportLab Graphics) shapes."""
def convertShape(self, name, node, clipping=None):
method_name = "convert%s" % name.capitalize()
shape = getattr(self, method_name)(node)
if not shape:
return
if name not in ('path', 'polyline', 'text'):
# Only apply style where the convert method did not apply it.
self.applyStyleOnShape(shape, node)
transform = node.getAttribute("transform")
if not (transform or clipping):
return shape
else:
group = Group()
if transform:
self.applyTransformOnGroup(transform, group)
if clipping:
group.add(clipping)
group.add(shape)
return group
def convertLine(self, node):
getAttr = node.getAttribute
x1, y1, x2, y2 = map(getAttr, ("x1", "y1", "x2", "y2"))
x1, y1, x2, y2 = map(self.attrConverter.convertLength, (x1, y1, x2, y2))
shape = Line(x1, y1, x2, y2)
return shape
def convertRect(self, node):
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
rx, ry = map(getAttr, ("rx", "ry"))
rx, ry = map(self.attrConverter.convertLength, (rx, ry))
shape = Rect(x, y, width, height, rx=rx, ry=ry)
return shape
def convertCircle(self, node):
# not rendered if r == 0, error if r < 0.
getAttr = node.getAttribute
cx, cy, r = map(getAttr, ("cx", "cy", 'r'))
cx, cy, r = map(self.attrConverter.convertLength, (cx, cy, r))
shape = Circle(cx, cy, r)
return shape
def convertEllipse(self, node):
getAttr = node.getAttribute
cx, cy, rx, ry = map(getAttr, ("cx", "cy", "rx", "ry"))
cx, cy, rx, ry = map(self.attrConverter.convertLength, (cx, cy, rx, ry))
width, height = rx, ry
shape = Ellipse(cx, cy, width, height)
return shape
def convertPolyline(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polyline
return None
polyline = PolyLine(points)
self.applyStyleOnShape(polyline, node)
has_fill = self.attrConverter.findAttr(node, 'fill') not in ('', 'none')
if has_fill:
# ReportLab doesn't fill polylines, so we are creating a polygon
# polygon copy of the polyline, but without stroke.
group = Group()
polygon = Polygon(points)
self.applyStyleOnShape(polygon, node)
polygon.strokeColor = None
group.add(polygon)
group.add(polyline)
return group
return polyline
def convertPolygon(self, node):
getAttr = node.getAttribute
points = getAttr("points")
points = points.replace(',', ' ')
points = points.split()
points = list(map(self.attrConverter.convertLength, points))
if len(points) % 2 != 0 or len(points) == 0:
# Odd number of coordinates or no coordinates, invalid polygon
return None
shape = Polygon(points)
return shape
def clean_text(self, text, preserve_space):
"""Text cleaning as per https://www.w3.org/TR/SVG/text.html#WhiteSpace
"""
if text is None:
return
if preserve_space:
text = text.replace('\r\n', ' ').replace('\n', ' ').replace('\t', ' ')
else:
text = text.replace('\r\n', '').replace('\n', '').replace('\t', ' ')
text = text.strip()
while (' ' in text):
text = text.replace(' ', ' ')
return text
def convertText(self, node):
attrConv = self.attrConverter
xml_space = node.getAttribute("{%s}space" % XML_NS)
if xml_space:
preserve_space = xml_space == 'preserve'
else:
preserve_space = self.preserve_space
gr = Group()
frag_lengths = []
dx0, dy0 = 0, 0
x1, y1 = 0, 0
ff = attrConv.findAttr(node, "font-family") or DEFAULT_FONT_NAME
ff = attrConv.convertFontFamily(ff)
fs = attrConv.findAttr(node, "font-size") or "12"
fs = attrConv.convertLength(fs)
convertLength = partial(attrConv.convertLength, em_base=fs)
x, y = map(node.getAttribute, ('x', 'y'))
x, y = map(convertLength, (x, y))
for c in itertools.chain([node], node.getchildren()):
has_x, has_y = False, False
dx, dy = 0, 0
baseLineShift = 0
if node_name(c) == 'text':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
elif node_name(c) == 'tspan':
text = self.clean_text(c.text, preserve_space)
if not text:
continue
x1, y1, dx, dy = [c.attrib.get(name, '') for name in ("x", "y", "dx", "dy")]
has_x, has_y = (x1 != '', y1 != '')
x1, y1, dx, dy = map(convertLength, (x1, y1, dx, dy))
dx0 = dx0 + dx
dy0 = dy0 + dy
baseLineShift = c.attrib.get("baseline-shift", '0')
if baseLineShift in ("sub", "super", "baseline"):
baseLineShift = {"sub":-fs/2, "super":fs/2, "baseline":0}[baseLineShift]
else:
baseLineShift = convertLength(baseLineShift, fs)
else:
continue
frag_lengths.append(stringWidth(text, ff, fs))
new_x = (x1 + dx) if has_x else (x + dx0 + sum(frag_lengths[:-1]))
new_y = (y1 + dy) if has_y else (y + dy0)
shape = String(new_x, -(new_y - baseLineShift), text)
self.applyStyleOnShape(shape, node)
if node_name(c) == 'tspan':
self.applyStyleOnShape(shape, c)
gr.add(shape)
gr.scale(1, -1)
return gr
def convertPath(self, node):
d = node.getAttribute('d')
if not d:
return None
normPath = normalise_svg_path(d)
path = Path()
points = path.points
# Track subpaths needing to be closed later
unclosed_subpath_pointers = []
subpath_start = []
lastop = ''
for i in range(0, len(normPath), 2):
op, nums = normPath[i:i+2]
if op in ('m', 'M') and i > 0 and path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
# moveto absolute
if op == 'M':
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto absolute
elif op == 'L':
path.lineTo(*nums)
# moveto relative
elif op == 'm':
if len(points) >= 2:
if lastop in ('Z', 'z'):
starting_point = subpath_start
else:
starting_point = points[-2:]
xn, yn = starting_point[0] + nums[0], starting_point[1] + nums[1]
path.moveTo(xn, yn)
else:
path.moveTo(*nums)
subpath_start = points[-2:]
# lineto relative
elif op == 'l':
xn, yn = points[-2] + nums[0], points[-1] + nums[1]
path.lineTo(xn, yn)
# horizontal/vertical line absolute
elif op == 'H':
path.lineTo(nums[0], points[-1])
elif op == 'V':
path.lineTo(points[-2], nums[0])
# horizontal/vertical line relative
elif op == 'h':
path.lineTo(points[-2] + nums[0], points[-1])
elif op == 'v':
path.lineTo(points[-2], points[-1] + nums[0])
# cubic bezier, absolute
elif op == 'C':
path.curveTo(*nums)
elif op == 'S':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x2, y2, xn, yn)
# cubic bezier, relative
elif op == 'c':
xp, yp = points[-2:]
x1, y1, x2, y2, xn, yn = nums
path.curveTo(xp + x1, yp + y1, xp + x2, yp + y2, xp + xn, yp + yn)
elif op == 's':
x2, y2, xn, yn = nums
if len(points) < 4 or lastop not in {'c', 'C', 's', 'S'}:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
path.curveTo(xi, yi, x0 + x2, y0 + y2, x0 + xn, y0 + yn)
# quadratic bezier, absolute
elif op == 'Q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 'T':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
xn, yn = nums
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# quadratic bezier, relative
elif op == 'q':
x0, y0 = points[-2:]
x1, y1, xn, yn = nums
x1, y1, xn, yn = x0 + x1, y0 + y1, x0 + xn, y0 + yn
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (x1, y1), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
elif op == 't':
if len(points) < 4:
xp, yp, x0, y0 = points[-2:] * 2
else:
xp, yp, x0, y0 = points[-4:]
x0, y0 = points[-2:]
xn, yn = nums
xn, yn = x0 + xn, y0 + yn
xi, yi = x0 + (x0 - xp), y0 + (y0 - yp)
(x0, y0), (x1, y1), (x2, y2), (xn, yn) = \
convert_quadratic_to_cubic_path((x0, y0), (xi, yi), (xn, yn))
path.curveTo(x1, y1, x2, y2, xn, yn)
# elliptical arc
elif op in ('A', 'a'):
rx, ry, phi, fA, fS, x2, y2 = nums
x1, y1 = points[-2:]
if op == 'a':
x2 += x1
y2 += y1
if abs(rx) <= 1e-10 or abs(ry) <= 1e-10:
path.lineTo(x2, y2)
else:
bp = bezier_arc_from_end_points(x1, y1, rx, ry, phi, fA, fS, x2, y2)
for _, _, x1, y1, x2, y2, xn, yn in bp:
path.curveTo(x1, y1, x2, y2, xn, yn)
# close path
elif op in ('Z', 'z'):
path.closePath()
else:
logger.debug("Suspicious path operator: %s" % op)
lastop = op
gr = Group()
self.applyStyleOnShape(path, node)
if path.operators[-1] != _CLOSEPATH:
unclosed_subpath_pointers.append(len(path.operators))
if unclosed_subpath_pointers and path.fillColor is not None:
# ReportLab doesn't fill unclosed paths, so we are creating a copy
# of the path with all subpaths closed, but without stroke.
# https://bitbucket.org/rptlab/reportlab/issues/99/
closed_path = NoStrokePath(copy_from=path)
for pointer in reversed(unclosed_subpath_pointers):
closed_path.operators.insert(pointer, _CLOSEPATH)
gr.add(closed_path)
path.fillColor = None
gr.add(path)
return gr
def convertImage(self, node):
if not haveImages:
logger.warning(
"Unable to handle embedded images. Maybe the pillow library is missing?"
)
return None
getAttr = node.getAttribute
x, y, width, height = map(getAttr, ('x', 'y', "width", "height"))
x, y, width, height = map(self.attrConverter.convertLength, (x, y, width, height))
image = node._resolved_target
image = Image(int(x), int(y + height), int(width), int(height), image)
group = Group(image)
group.translate(0, (y + height) * 2)
group.scale(1, -1)
return group
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values))
def applyStyleOnShape(self, shape, node, only_explicit=False):
"""
Apply styles from an SVG element to an RLG shape.
If only_explicit is True, only attributes really present are applied.
"""
# RLG-specific: all RLG shapes
"Apply style attributes of a sequence of nodes to an RL shape."
# tuple format: (svgAttr, rlgAttr, converter, default)
mappingN = (
("fill", "fillColor", "convertColor", "black"),
("fill-opacity", "fillOpacity", "convertOpacity", 1),
("fill-rule", "_fillRule", "convertFillRule", "nonzero"),
("stroke", "strokeColor", "convertColor", "none"),
("stroke-width", "strokeWidth", "convertLength", "1"),
("stroke-opacity", "strokeOpacity", "convertOpacity", 1),
("stroke-linejoin", "strokeLineJoin", "convertLineJoin", "0"),
("stroke-linecap", "strokeLineCap", "convertLineCap", "0"),
("stroke-dasharray", "strokeDashArray", "convertDashArray", "none"),
)
mappingF = (
("font-family", "fontName", "convertFontFamily", DEFAULT_FONT_NAME),
("font-size", "fontSize", "convertLength", "12"),
("text-anchor", "textAnchor", "id", "start"),
)
if shape.__class__ == Group:
# Recursively apply style on Group subelements
for subshape in shape.contents:
self.applyStyleOnShape(subshape, node, only_explicit=only_explicit)
return
ac = self.attrConverter
for mapping in (mappingN, mappingF):
if shape.__class__ != String and mapping == mappingF:
continue
for (svgAttrName, rlgAttr, func, default) in mapping:
svgAttrValue = ac.findAttr(node, svgAttrName)
if svgAttrValue == '':
if only_explicit:
continue
else:
svgAttrValue = default
if svgAttrValue == "currentColor":
svgAttrValue = ac.findAttr(node.getparent(), "color") or default
try:
meth = getattr(ac, func)
setattr(shape, rlgAttr, meth(svgAttrValue))
except (AttributeError, KeyError, ValueError):
pass
if getattr(shape, 'fillOpacity', None) is not None and shape.fillColor:
shape.fillColor.alpha = shape.fillOpacity
def svg2rlg(path, **kwargs):
"Convert an SVG file to an RLG Drawing object."
# unzip .svgz file into .svg
unzipped = False
if isinstance(path, str) and os.path.splitext(path)[1].lower() == ".svgz":
with gzip.open(path, 'rb') as f_in, open(path[:-1], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
path = path[:-1]
unzipped = True
svg_root = load_svg_file(path)
if svg_root is None:
return
# convert to a RLG drawing
svgRenderer = SvgRenderer(path, **kwargs)
drawing = svgRenderer.render(svg_root)
# remove unzipped .svgz file (.svg)
if unzipped:
os.remove(path)
return drawing
def load_svg_file(path):
parser = etree.XMLParser(remove_comments=True, recover=True)
try:
doc = etree.parse(path, parser=parser)
svg_root = doc.getroot()
except Exception as exc:
logger.error("Failed to load input file! (%s)" % exc)
else:
return svg_root
def node_name(node):
"""Return lxml node name without the namespace prefix."""
try:
return node.tag.split('}')[-1]
except AttributeError:
pass
monkeypatch_reportlab()
|
deeplook/svglib | svglib/svglib.py | AttributeConverter.parseMultiAttributes | python | def parseMultiAttributes(self, line):
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs | Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L180-L196 | null | class AttributeConverter(object):
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
def findAttr(self, svgNode, name):
"""Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned.
"""
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return ''
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result
|
deeplook/svglib | svglib/svglib.py | AttributeConverter.findAttr | python | def findAttr(self, svgNode, name):
# This needs also to lookup values like "url(#SomeName)"...
if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):
if isinstance(svgNode, NodeTracker):
svgNode.apply_rules(self.css_rules)
else:
ElementWrapper(svgNode).apply_rules(self.css_rules)
attr_value = svgNode.attrib.get(name, '').strip()
if attr_value and attr_value != "inherit":
return attr_value
elif svgNode.attrib.get("style"):
dict = self.parseMultiAttributes(svgNode.attrib.get("style"))
if name in dict:
return dict[name]
if svgNode.getparent() is not None:
return self.findAttr(svgNode.getparent(), name)
return '' | Search an attribute with some name in some node or above.
First the node is searched, then its style attribute, then
the search continues in the node's parent node. If no such
attribute is found, '' is returned. | train | https://github.com/deeplook/svglib/blob/859f9f461f1041018af3e6f507bb4c0616b04fbb/svglib/svglib.py#L198-L223 | [
"def parseMultiAttributes(self, line):\n \"\"\"Try parsing compound attribute string.\n\n Return a dictionary with single attributes in 'line'.\n \"\"\"\n\n attrs = line.split(';')\n attrs = [a.strip() for a in attrs]\n attrs = filter(lambda a:len(a)>0, attrs)\n\n new_attrs = {}\n for a in attrs:\n k, v = a.split(':')\n k, v = [s.strip() for s in (k, v)]\n new_attrs[k] = v\n\n return new_attrs\n",
"def findAttr(self, svgNode, name):\n \"\"\"Search an attribute with some name in some node or above.\n\n First the node is searched, then its style attribute, then\n the search continues in the node's parent node. If no such\n attribute is found, '' is returned.\n \"\"\"\n\n # This needs also to lookup values like \"url(#SomeName)\"...\n\n if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False):\n if isinstance(svgNode, NodeTracker):\n svgNode.apply_rules(self.css_rules)\n else:\n ElementWrapper(svgNode).apply_rules(self.css_rules)\n attr_value = svgNode.attrib.get(name, '').strip()\n\n if attr_value and attr_value != \"inherit\":\n return attr_value\n elif svgNode.attrib.get(\"style\"):\n dict = self.parseMultiAttributes(svgNode.attrib.get(\"style\"))\n if name in dict:\n return dict[name]\n if svgNode.getparent() is not None:\n return self.findAttr(svgNode.getparent(), name)\n return ''\n"
] | class AttributeConverter(object):
"An abstract class to locate and convert attributes in a DOM instance."
def __init__(self):
self.css_rules = None
def parseMultiAttributes(self, line):
"""Try parsing compound attribute string.
Return a dictionary with single attributes in 'line'.
"""
attrs = line.split(';')
attrs = [a.strip() for a in attrs]
attrs = filter(lambda a:len(a)>0, attrs)
new_attrs = {}
for a in attrs:
k, v = a.split(':')
k, v = [s.strip() for s in (k, v)]
new_attrs[k] = v
return new_attrs
def getAllAttributes(self, svgNode):
"Return a dictionary of all attributes of svgNode or those inherited by it."
dict = {}
if node_name(svgNode.getparent()) == 'g':
dict.update(self.getAllAttributes(svgNode.getparent()))
style = svgNode.attrib.get("style")
if style:
d = self.parseMultiAttributes(style)
dict.update(d)
for key, value in svgNode.attrib.items():
if key != "style":
dict[key] = value
return dict
def id(self, svgAttr):
"Return attribute as is."
return svgAttr
def convertTransform(self, svgAttr):
"""Parse transform attribute string.
E.g. "scale(2) translate(10,20)"
-> [("scale", 2), ("translate", (10,20))]
"""
line = svgAttr.strip()
ops = line[:]
brackets = []
indices = []
for i, lin in enumerate(line):
if lin in "()":
brackets.append(i)
for i in range(0, len(brackets), 2):
bi, bj = brackets[i], brackets[i+1]
subline = line[bi+1:bj]
subline = subline.strip()
subline = subline.replace(',', ' ')
subline = re.sub("[ ]+", ',', subline)
try:
if ',' in subline:
indices.append(tuple(float(num) for num in subline.split(',')))
else:
indices.append(float(subline))
except ValueError:
continue
ops = ops[:bi] + ' '*(bj-bi+1) + ops[bj+1:]
ops = ops.replace(',', ' ').split()
if len(ops) != len(indices):
logger.warning("Unable to parse transform expression '%s'" % svgAttr)
return []
result = []
for i, op in enumerate(ops):
result.append((op, indices[i]))
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.